diff options
Diffstat (limited to 'drivers')
469 files changed, 9207 insertions, 6030 deletions
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index 10975bb603fb..a35dd0e41c27 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -536,16 +536,19 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table, static struct acpi_table_header *acpi_get_pptt(void) { static struct acpi_table_header *pptt; + static bool is_pptt_checked; acpi_status status; /* * PPTT will be used at runtime on every CPU hotplug in path, so we * don't need to call acpi_put_table() to release the table mapping. */ - if (!pptt) { + if (!pptt && !is_pptt_checked) { status = acpi_get_table(ACPI_SIG_PPTT, 0, &pptt); if (ACPI_FAILURE(status)) acpi_pptt_warn_missing(); + + is_pptt_checked = true; } return pptt; diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 1278969eec1f..4bd16b3f0781 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -263,6 +263,12 @@ static int __init acpi_processor_driver_init(void) if (acpi_disabled) return 0; + if (!cpufreq_register_notifier(&acpi_processor_notifier_block, + CPUFREQ_POLICY_NOTIFIER)) { + acpi_processor_cpufreq_init = true; + acpi_processor_ignore_ppc_init(); + } + result = driver_register(&acpi_processor_driver); if (result < 0) return result; @@ -276,12 +282,6 @@ static int __init acpi_processor_driver_init(void) cpuhp_setup_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD, "acpi/cpu-drv:dead", NULL, acpi_soft_cpu_dead); - if (!cpufreq_register_notifier(&acpi_processor_notifier_block, - CPUFREQ_POLICY_NOTIFIER)) { - acpi_processor_cpufreq_init = true; - acpi_processor_ignore_ppc_init(); - } - acpi_processor_throttling_init(); return 0; err: diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c index e534fd49a67e..b7c6287eccca 100644 --- a/drivers/acpi/processor_thermal.c +++ b/drivers/acpi/processor_thermal.c @@ -140,9 +140,13 @@ void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy) ret = freq_qos_add_request(&policy->constraints, &pr->thermal_req, FREQ_QOS_MAX, INT_MAX); - if (ret < 0) + if (ret < 0) { pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu, ret); + continue; + } + + thermal_cooling_device_update(pr->cdev); } } @@ -153,8 +157,12 @@ void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy) for_each_cpu(cpu, policy->related_cpus) { struct acpi_processor *pr = per_cpu(processors, cpu); - if (pr) - freq_qos_remove_request(&pr->thermal_req); + if (!pr) + continue; + + freq_qos_remove_request(&pr->thermal_req); + + thermal_cooling_device_update(pr->cdev); } } #else /* ! CONFIG_CPU_FREQ */ diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 7c9125df5a65..7b4801ce62d6 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -400,6 +400,13 @@ static const struct dmi_system_id medion_laptop[] = { DMI_MATCH(DMI_BOARD_NAME, "M17T"), }, }, + { + .ident = "MEDION S17413", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), + DMI_MATCH(DMI_BOARD_NAME, "M1xA"), + }, + }, { } }; diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 710ac640267d..fd7cbce8076e 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -497,6 +497,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = { }, { .callback = video_detect_force_native, + /* Acer Aspire 3830TG */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3830TG"), + }, + }, + { + .callback = video_detect_force_native, /* Acer Aspire 4810T */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), @@ -716,6 +724,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5515"), }, }, + { + .callback = video_detect_force_native, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 15 3535"), + }, + }, /* * Desktops which falsely report a backlight and which our heuristics diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index e45285d4e62a..da5727069d85 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -251,6 +251,7 @@ bool force_storage_d3(void) #define ACPI_QUIRK_UART1_TTY_UART2_SKIP BIT(1) #define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(2) #define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(3) +#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS BIT(4) static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { /* @@ -280,13 +281,35 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { */ #if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS) { + /* Acer Iconia One 7 B1-750 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_MATCH(DMI_PRODUCT_NAME, "VESPA2"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | + ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), + }, + { .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ME176C"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | ACPI_QUIRK_UART1_TTY_UART2_SKIP | - ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | + ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), + }, + { + /* Lenovo Yoga Book X90F/L */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | + ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, { .matches = { @@ -294,7 +317,8 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | - ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | + ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, { /* Lenovo Yoga Tablet 2 1050F/L */ @@ -336,7 +360,8 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { DMI_MATCH(DMI_PRODUCT_NAME, "M890BAP"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | - ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | + ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, { /* Whitelabel (sold as various brands) TM800A550L */ @@ -413,6 +438,20 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s return 0; } EXPORT_SYMBOL_GPL(acpi_quirk_skip_serdev_enumeration); + +bool acpi_quirk_skip_gpio_event_handlers(void) +{ + const struct dmi_system_id *dmi_id; + long quirks; + + dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids); + if (!dmi_id) + return false; + + quirks = (unsigned long)dmi_id->driver_data; + return (quirks & ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS); +} +EXPORT_SYMBOL_GPL(acpi_quirk_skip_gpio_event_handlers); #endif /* Lists of PMIC ACPI HIDs with an (often better) native charger driver */ diff --git a/drivers/ata/pata_parport/pata_parport.c b/drivers/ata/pata_parport/pata_parport.c index 294a266a0dda..c1576d943b43 100644 --- a/drivers/ata/pata_parport/pata_parport.c +++ b/drivers/ata/pata_parport/pata_parport.c @@ -381,6 +381,7 @@ static void pata_parport_dev_release(struct device *dev) { struct pi_adapter *pi = container_of(dev, struct pi_adapter, dev); + ida_free(&pata_parport_bus_dev_ids, dev->id); kfree(pi); } @@ -433,23 +434,27 @@ static struct pi_adapter *pi_init_one(struct parport *parport, if (bus_for_each_dev(&pata_parport_bus_type, NULL, &match, pi_find_dev)) return NULL; + id = ida_alloc(&pata_parport_bus_dev_ids, GFP_KERNEL); + if (id < 0) + return NULL; + pi = kzalloc(sizeof(struct pi_adapter), GFP_KERNEL); - if (!pi) + if (!pi) { + ida_free(&pata_parport_bus_dev_ids, id); return NULL; + } /* set up pi->dev before pi_probe_unit() so it can use dev_printk() */ pi->dev.parent = &pata_parport_bus; pi->dev.bus = &pata_parport_bus_type; pi->dev.driver = &pr->driver; pi->dev.release = pata_parport_dev_release; - id = ida_alloc(&pata_parport_bus_dev_ids, GFP_KERNEL); - if (id < 0) - return NULL; /* pata_parport_dev_release will do kfree(pi) */ pi->dev.id = id; dev_set_name(&pi->dev, "pata_parport.%u", pi->dev.id); if (device_register(&pi->dev)) { put_device(&pi->dev); - goto out_ida_free; + /* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */ + return NULL; } pi->proto = pr; @@ -464,8 +469,7 @@ static struct pi_adapter *pi_init_one(struct parport *parport, pi->port = parport->base; par_cb.private = pi; - pi->pardev = parport_register_dev_model(parport, DRV_NAME, &par_cb, - pi->dev.id); + pi->pardev = parport_register_dev_model(parport, DRV_NAME, &par_cb, id); if (!pi->pardev) goto out_module_put; @@ -487,12 +491,13 @@ static struct pi_adapter *pi_init_one(struct parport *parport, pi_connect(pi); if (ata_host_activate(host, 0, NULL, 0, &pata_parport_sht)) - goto out_unreg_parport; + goto out_disconnect; return pi; -out_unreg_parport: +out_disconnect: pi_disconnect(pi); +out_unreg_parport: parport_unregister_device(pi->pardev); if (pi->proto->release_proto) pi->proto->release_proto(pi); @@ -500,8 +505,7 @@ out_module_put: module_put(pi->proto->owner); out_unreg_dev: device_unregister(&pi->dev); -out_ida_free: - ida_free(&pata_parport_bus_dev_ids, pi->dev.id); + /* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */ return NULL; } @@ -626,8 +630,7 @@ static void pi_remove_one(struct device *dev) pi_disconnect(pi); pi_release(pi); device_unregister(dev); - ida_free(&pata_parport_bus_dev_ids, dev->id); - /* pata_parport_dev_release will do kfree(pi) */ + /* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */ } static ssize_t delete_device_store(struct bus_type *bus, const char *buf, @@ -643,6 +646,7 @@ static ssize_t delete_device_store(struct bus_type *bus, const char *buf, } pi_remove_one(dev); + put_device(dev); mutex_unlock(&pi_mutex); return count; diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index eec0cc2144e0..e327a0229dc1 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -2909,6 +2909,7 @@ close_card_oam(struct idt77252_dev *card) recycle_rx_pool_skb(card, &vc->rcv.rx_pool); } + kfree(vc); } } } @@ -2952,6 +2953,15 @@ open_card_ubr0(struct idt77252_dev *card) return 0; } +static void +close_card_ubr0(struct idt77252_dev *card) +{ + struct vc_map *vc = card->vcs[0]; + + free_scq(card, vc->scq); + kfree(vc); +} + static int idt77252_dev_open(struct idt77252_dev *card) { @@ -3001,6 +3011,7 @@ static void idt77252_dev_close(struct atm_dev *dev) struct idt77252_dev *card = dev->dev_data; u32 conf; + close_card_ubr0(card); close_card_oam(card); conf = SAR_CFG_RXPTH | /* enable receive path */ diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 839373451c2b..28eb59fd71ca 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1859,35 +1859,44 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, static void loop_handle_cmd(struct loop_cmd *cmd) { + struct cgroup_subsys_state *cmd_blkcg_css = cmd->blkcg_css; + struct cgroup_subsys_state *cmd_memcg_css = cmd->memcg_css; struct request *rq = blk_mq_rq_from_pdu(cmd); const bool write = op_is_write(req_op(rq)); struct loop_device *lo = rq->q->queuedata; int ret = 0; struct mem_cgroup *old_memcg = NULL; + const bool use_aio = cmd->use_aio; if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { ret = -EIO; goto failed; } - if (cmd->blkcg_css) - kthread_associate_blkcg(cmd->blkcg_css); - if (cmd->memcg_css) + if (cmd_blkcg_css) + kthread_associate_blkcg(cmd_blkcg_css); + if (cmd_memcg_css) old_memcg = set_active_memcg( - mem_cgroup_from_css(cmd->memcg_css)); + mem_cgroup_from_css(cmd_memcg_css)); + /* + * do_req_filebacked() may call blk_mq_complete_request() synchronously + * or asynchronously if using aio. Hence, do not touch 'cmd' after + * do_req_filebacked() has returned unless we are sure that 'cmd' has + * not yet been completed. + */ ret = do_req_filebacked(lo, rq); - if (cmd->blkcg_css) + if (cmd_blkcg_css) kthread_associate_blkcg(NULL); - if (cmd->memcg_css) { + if (cmd_memcg_css) { set_active_memcg(old_memcg); - css_put(cmd->memcg_css); + css_put(cmd_memcg_css); } failed: /* complete non-aio request */ - if (!cmd->use_aio || ret) { + if (!use_aio || ret) { if (ret == -EOPNOTSUPP) cmd->ret = ret; else diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index 4c601ca9552a..9e6b032c8ecc 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -1413,8 +1413,7 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd) case NULL_IRQ_SOFTIRQ: switch (cmd->nq->dev->queue_mode) { case NULL_Q_MQ: - if (likely(!blk_should_fake_timeout(cmd->rq->q))) - blk_mq_complete_request(cmd->rq); + blk_mq_complete_request(cmd->rq); break; case NULL_Q_BIO: /* @@ -1658,12 +1657,13 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq) } static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, - const struct blk_mq_queue_data *bd) + const struct blk_mq_queue_data *bd) { - struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); + struct request *rq = bd->rq; + struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); struct nullb_queue *nq = hctx->driver_data; - sector_t nr_sectors = blk_rq_sectors(bd->rq); - sector_t sector = blk_rq_pos(bd->rq); + sector_t nr_sectors = blk_rq_sectors(rq); + sector_t sector = blk_rq_pos(rq); const bool is_poll = hctx->type == HCTX_TYPE_POLL; might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); @@ -1672,14 +1672,15 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); cmd->timer.function = null_cmd_timer_expired; } - cmd->rq = bd->rq; + cmd->rq = rq; cmd->error = BLK_STS_OK; cmd->nq = nq; - cmd->fake_timeout = should_timeout_request(bd->rq); + cmd->fake_timeout = should_timeout_request(rq) || + blk_should_fake_timeout(rq->q); - blk_mq_start_request(bd->rq); + blk_mq_start_request(rq); - if (should_requeue_request(bd->rq)) { + if (should_requeue_request(rq)) { /* * Alternate between hitting the core BUSY path, and the * driver driven requeue path @@ -1687,22 +1688,20 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, nq->requeue_selection++; if (nq->requeue_selection & 1) return BLK_STS_RESOURCE; - else { - blk_mq_requeue_request(bd->rq, true); - return BLK_STS_OK; - } + blk_mq_requeue_request(rq, true); + return BLK_STS_OK; } if (is_poll) { spin_lock(&nq->poll_lock); - list_add_tail(&bd->rq->queuelist, &nq->poll_list); + list_add_tail(&rq->queuelist, &nq->poll_list); spin_unlock(&nq->poll_lock); return BLK_STS_OK; } if (cmd->fake_timeout) return BLK_STS_OK; - return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq)); + return null_handle_cmd(cmd, sector, nr_sectors, req_op(rq)); } static void cleanup_queue(struct nullb_queue *nq) diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index fb855da971ee..9fa821fa76b0 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -972,6 +972,8 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) print_version(); hp = mdesc_grab(); + if (!hp) + return -ENODEV; err = -ENODEV; if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) { diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index d1d1c8d606c8..c73cc57ec547 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -715,7 +715,8 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io, } } -static void ubq_complete_io_cmd(struct ublk_io *io, int res) +static void ubq_complete_io_cmd(struct ublk_io *io, int res, + unsigned issue_flags) { /* mark this cmd owned by ublksrv */ io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV; @@ -727,7 +728,7 @@ static void ubq_complete_io_cmd(struct ublk_io *io, int res) io->flags &= ~UBLK_IO_FLAG_ACTIVE; /* tell ublksrv one io request is coming */ - io_uring_cmd_done(io->cmd, res, 0); + io_uring_cmd_done(io->cmd, res, 0, issue_flags); } #define UBLK_REQUEUE_DELAY_MS 3 @@ -744,7 +745,8 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq, mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0); } -static inline void __ublk_rq_task_work(struct request *req) +static inline void __ublk_rq_task_work(struct request *req, + unsigned issue_flags) { struct ublk_queue *ubq = req->mq_hctx->driver_data; int tag = req->tag; @@ -782,7 +784,7 @@ static inline void __ublk_rq_task_work(struct request *req) pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n", __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags); - ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA); + ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags); return; } /* @@ -820,17 +822,18 @@ static inline void __ublk_rq_task_work(struct request *req) mapped_bytes >> 9; } - ubq_complete_io_cmd(io, UBLK_IO_RES_OK); + ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags); } -static inline void ublk_forward_io_cmds(struct ublk_queue *ubq) +static inline void ublk_forward_io_cmds(struct ublk_queue *ubq, + unsigned issue_flags) { struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds); struct ublk_rq_data *data, *tmp; io_cmds = llist_reverse_order(io_cmds); llist_for_each_entry_safe(data, tmp, io_cmds, node) - __ublk_rq_task_work(blk_mq_rq_from_pdu(data)); + __ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags); } static inline void ublk_abort_io_cmds(struct ublk_queue *ubq) @@ -842,12 +845,12 @@ static inline void ublk_abort_io_cmds(struct ublk_queue *ubq) __ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data)); } -static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd) +static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags) { struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); struct ublk_queue *ubq = pdu->ubq; - ublk_forward_io_cmds(ubq); + ublk_forward_io_cmds(ubq, issue_flags); } static void ublk_rq_task_work_fn(struct callback_head *work) @@ -856,8 +859,9 @@ static void ublk_rq_task_work_fn(struct callback_head *work) struct ublk_rq_data, work); struct request *req = blk_mq_rq_from_pdu(data); struct ublk_queue *ubq = req->mq_hctx->driver_data; + unsigned issue_flags = IO_URING_F_UNLOCKED; - ublk_forward_io_cmds(ubq); + ublk_forward_io_cmds(ubq, issue_flags); } static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq) @@ -1111,7 +1115,8 @@ static void ublk_cancel_queue(struct ublk_queue *ubq) struct ublk_io *io = &ubq->ios[i]; if (io->flags & UBLK_IO_FLAG_ACTIVE) - io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0); + io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0, + IO_URING_F_UNLOCKED); } /* all io commands are canceled */ @@ -1351,7 +1356,7 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) return -EIOCBQUEUED; out: - io_uring_cmd_done(cmd, ret, 0); + io_uring_cmd_done(cmd, ret, 0, issue_flags); pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n", __func__, cmd_op, tag, ret, io->flags); return -EIOCBQUEUED; @@ -1602,17 +1607,18 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd) set_bit(GD_SUPPRESS_PART_SCAN, &disk->state); get_device(&ub->cdev_dev); + ub->dev_info.state = UBLK_S_DEV_LIVE; ret = add_disk(disk); if (ret) { /* * Has to drop the reference since ->free_disk won't be * called in case of add_disk failure. */ + ub->dev_info.state = UBLK_S_DEV_DEAD; ublk_put_device(ub); goto out_put_disk; } set_bit(UB_STATE_USED, &ub->state); - ub->dev_info.state = UBLK_S_DEV_LIVE; out_put_disk: if (ret) put_disk(disk); @@ -2233,7 +2239,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd, if (ub) ublk_put_device(ub); out: - io_uring_cmd_done(cmd, ret, 0); + io_uring_cmd_done(cmd, ret, 0, issue_flags); pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n", __func__, ret, cmd->cmd_op, header->dev_id, header->queue_id); return -EIOCBQUEUED; diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index bede8b005594..af774688f1c0 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -26,7 +26,14 @@ #define ECDSA_HEADER_LEN 320 #define BTINTEL_PPAG_NAME "PPAG" -#define BTINTEL_PPAG_PREFIX "\\_SB_.PCI0.XHCI.RHUB" + +/* structure to store the PPAG data read from ACPI table */ +struct btintel_ppag { + u32 domain; + u32 mode; + acpi_status status; + struct hci_dev *hdev; +}; #define CMD_WRITE_BOOT_PARAMS 0xfc0e struct cmd_write_boot_params { @@ -1295,17 +1302,16 @@ static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); if (ACPI_FAILURE(status)) { - bt_dev_warn(hdev, "ACPI Failure: %s", acpi_format_exception(status)); + bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status)); return status; } - if (strncmp(BTINTEL_PPAG_PREFIX, string.pointer, - strlen(BTINTEL_PPAG_PREFIX))) { + len = strlen(string.pointer); + if (len < strlen(BTINTEL_PPAG_NAME)) { kfree(string.pointer); return AE_OK; } - len = strlen(string.pointer); if (strncmp((char *)string.pointer + len - 4, BTINTEL_PPAG_NAME, 4)) { kfree(string.pointer); return AE_OK; @@ -1314,7 +1320,8 @@ static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data status = acpi_evaluate_object(handle, NULL, NULL, &buffer); if (ACPI_FAILURE(status)) { - bt_dev_warn(hdev, "ACPI Failure: %s", acpi_format_exception(status)); + ppag->status = status; + bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status)); return status; } @@ -1323,8 +1330,9 @@ static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) { kfree(buffer.pointer); - bt_dev_warn(hdev, "Invalid object type: %d or package count: %d", + bt_dev_warn(hdev, "PPAG-BT: Invalid object type: %d or package count: %d", p->type, p->package.count); + ppag->status = AE_ERROR; return AE_ERROR; } @@ -1335,6 +1343,7 @@ static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data ppag->domain = (u32)p->package.elements[0].integer.value; ppag->mode = (u32)p->package.elements[1].integer.value; + ppag->status = AE_OK; kfree(buffer.pointer); return AE_CTRL_TERMINATE; } @@ -2314,12 +2323,12 @@ error: static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver) { - acpi_status status; struct btintel_ppag ppag; struct sk_buff *skb; struct btintel_loc_aware_reg ppag_cmd; + acpi_handle handle; - /* PPAG is not supported if CRF is HrP2, Jfp2, JfP1 */ + /* PPAG is not supported if CRF is HrP2, Jfp2, JfP1 */ switch (ver->cnvr_top & 0xFFF) { case 0x504: /* Hrp2 */ case 0x202: /* Jfp2 */ @@ -2327,29 +2336,35 @@ static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver return; } + handle = ACPI_HANDLE(GET_HCIDEV_DEV(hdev)); + if (!handle) { + bt_dev_info(hdev, "No support for BT device in ACPI firmware"); + return; + } + memset(&ppag, 0, sizeof(ppag)); ppag.hdev = hdev; - status = acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, - ACPI_UINT32_MAX, NULL, - btintel_ppag_callback, &ppag, NULL); + ppag.status = AE_NOT_FOUND; + acpi_walk_namespace(ACPI_TYPE_PACKAGE, handle, 1, NULL, + btintel_ppag_callback, &ppag, NULL); - if (ACPI_FAILURE(status)) { - /* Do not log warning message if ACPI entry is not found */ - if (status == AE_NOT_FOUND) + if (ACPI_FAILURE(ppag.status)) { + if (ppag.status == AE_NOT_FOUND) { + bt_dev_dbg(hdev, "PPAG-BT: ACPI entry not found"); return; - bt_dev_warn(hdev, "PPAG: ACPI Failure: %s", acpi_format_exception(status)); + } return; } if (ppag.domain != 0x12) { - bt_dev_warn(hdev, "PPAG-BT Domain disabled"); + bt_dev_warn(hdev, "PPAG-BT: domain is not bluetooth"); return; } /* PPAG mode, BIT0 = 0 Disabled, BIT0 = 1 Enabled */ if (!(ppag.mode & BIT(0))) { - bt_dev_dbg(hdev, "PPAG disabled"); + bt_dev_dbg(hdev, "PPAG-BT: disabled"); return; } diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h index 8e7da877efae..8fdb65b66315 100644 --- a/drivers/bluetooth/btintel.h +++ b/drivers/bluetooth/btintel.h @@ -137,13 +137,6 @@ struct intel_offload_use_cases { __u8 preset[8]; } __packed; -/* structure to store the PPAG data read from ACPI table */ -struct btintel_ppag { - u32 domain; - u32 mode; - struct hci_dev *hdev; -}; - struct btintel_loc_aware_reg { __le32 mcc; __le32 sel; diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c index 2acb719e596f..11c7e04bf394 100644 --- a/drivers/bluetooth/btqcomsmd.c +++ b/drivers/bluetooth/btqcomsmd.c @@ -122,6 +122,21 @@ static int btqcomsmd_setup(struct hci_dev *hdev) return 0; } +static int btqcomsmd_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + int ret; + + ret = qca_set_bdaddr_rome(hdev, bdaddr); + if (ret) + return ret; + + /* The firmware stops responding for a while after setting the bdaddr, + * causing timeouts for subsequent commands. Sleep a bit to avoid this. + */ + usleep_range(1000, 10000); + return 0; +} + static int btqcomsmd_probe(struct platform_device *pdev) { struct btqcomsmd *btq; @@ -162,7 +177,7 @@ static int btqcomsmd_probe(struct platform_device *pdev) hdev->close = btqcomsmd_close; hdev->send = btqcomsmd_send; hdev->setup = btqcomsmd_setup; - hdev->set_bdaddr = qca_set_bdaddr_rome; + hdev->set_bdaddr = btqcomsmd_set_bdaddr; ret = hci_register_dev(hdev); if (ret < 0) diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c index 795be33f2892..02893600db39 100644 --- a/drivers/bluetooth/btsdio.c +++ b/drivers/bluetooth/btsdio.c @@ -354,6 +354,7 @@ static void btsdio_remove(struct sdio_func *func) BT_DBG("func %p", func); + cancel_work_sync(&data->work); if (!data) return; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 18bc94718711..5c536151ef83 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -1050,21 +1050,11 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count) hci_skb_expect(skb) -= len; if (skb->len == HCI_ACL_HDR_SIZE) { - __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle); __le16 dlen = hci_acl_hdr(skb)->dlen; - __u8 type; /* Complete ACL header */ hci_skb_expect(skb) = __le16_to_cpu(dlen); - /* Detect if ISO packet has been sent over bulk */ - if (hci_conn_num(data->hdev, ISO_LINK)) { - type = hci_conn_lookup_type(data->hdev, - hci_handle(handle)); - if (type == ISO_LINK) - hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; - } - if (skb_tailroom(skb) < hci_skb_expect(skb)) { kfree_skb(skb); skb = NULL; diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c index 2a6b4f676458..36d42484142a 100644 --- a/drivers/bus/imx-weim.c +++ b/drivers/bus/imx-weim.c @@ -204,8 +204,8 @@ static int weim_parse_dt(struct platform_device *pdev) const struct of_device_id *of_id = of_match_device(weim_id_table, &pdev->dev); const struct imx_weim_devtype *devtype = of_id->data; + int ret = 0, have_child = 0; struct device_node *child; - int ret, have_child = 0; struct weim_priv *priv; void __iomem *base; u32 reg; diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index b6c5bf69a2b2..1eef05bb1f99 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -91,7 +91,7 @@ config COMMON_CLK_RK808 config COMMON_CLK_HI655X tristate "Clock driver for Hi655x" if EXPERT depends on (MFD_HI655X_PMIC || COMPILE_TEST) - depends on REGMAP + select REGMAP default MFD_HI655X_PMIC help This driver supports the hi655x PMIC clock. This diff --git a/drivers/clk/bcm/clk-bcm2835-aux.c b/drivers/clk/bcm/clk-bcm2835-aux.c index 290a2846a86b..0fafa5cba442 100644 --- a/drivers/clk/bcm/clk-bcm2835-aux.c +++ b/drivers/clk/bcm/clk-bcm2835-aux.c @@ -69,4 +69,3 @@ builtin_platform_driver(bcm2835_aux_clk_driver); MODULE_AUTHOR("Eric Anholt <eric@anholt.net>"); MODULE_DESCRIPTION("BCM2835 auxiliary peripheral clock driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index e74fe6219d14..8dc476ef5bf9 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -2350,4 +2350,3 @@ builtin_platform_driver(bcm2835_clk_driver); MODULE_AUTHOR("Eric Anholt <eric@anholt.net>"); MODULE_DESCRIPTION("BCM2835 clock driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/clk/clk-fixed-mmio.c b/drivers/clk/clk-fixed-mmio.c index 5225d17d6b3f..8609fca29cc4 100644 --- a/drivers/clk/clk-fixed-mmio.c +++ b/drivers/clk/clk-fixed-mmio.c @@ -99,4 +99,3 @@ module_platform_driver(of_fixed_mmio_clk_driver); MODULE_AUTHOR("Jan Kotas <jank@cadence.com>"); MODULE_DESCRIPTION("Memory Mapped IO Fixed clock driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/clk-fsl-sai.c b/drivers/clk/clk-fsl-sai.c index 6238fcea0467..ee5baf993ff2 100644 --- a/drivers/clk/clk-fsl-sai.c +++ b/drivers/clk/clk-fsl-sai.c @@ -88,5 +88,4 @@ module_platform_driver(fsl_sai_clk_driver); MODULE_DESCRIPTION("Freescale SAI bitclock-as-a-clock driver"); MODULE_AUTHOR("Michael Walle <michael@walle.cc>"); -MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:fsl-sai-clk"); diff --git a/drivers/clk/clk-k210.c b/drivers/clk/clk-k210.c index 67a7cb3503c3..4eed667eddaf 100644 --- a/drivers/clk/clk-k210.c +++ b/drivers/clk/clk-k210.c @@ -495,7 +495,7 @@ static unsigned long k210_pll_get_rate(struct clk_hw *hw, f = FIELD_GET(K210_PLL_CLKF, reg) + 1; od = FIELD_GET(K210_PLL_CLKOD, reg) + 1; - return (u64)parent_rate * f / (r * od); + return div_u64((u64)parent_rate * f, r * od); } static const struct clk_ops k210_pll_ops = { diff --git a/drivers/clk/hisilicon/clk-hi3559a.c b/drivers/clk/hisilicon/clk-hi3559a.c index 9ea1a80acbe8..8036bd8cbb0a 100644 --- a/drivers/clk/hisilicon/clk-hi3559a.c +++ b/drivers/clk/hisilicon/clk-hi3559a.c @@ -841,5 +841,4 @@ static void __exit hi3559av100_crg_exit(void) module_exit(hi3559av100_crg_exit); -MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("HiSilicon Hi3559AV100 CRG Driver"); diff --git a/drivers/clk/microchip/clk-mpfs-ccc.c b/drivers/clk/microchip/clk-mpfs-ccc.c index 0ddc73e07be4..bce61c45e967 100644 --- a/drivers/clk/microchip/clk-mpfs-ccc.c +++ b/drivers/clk/microchip/clk-mpfs-ccc.c @@ -291,4 +291,3 @@ module_exit(clk_ccc_exit); MODULE_DESCRIPTION("Microchip PolarFire SoC Clock Conditioning Circuitry Driver"); MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>"); -MODULE_LICENSE("GPL"); diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c index 6ad2954948a5..11316c3b14ca 100644 --- a/drivers/cpuidle/cpuidle-psci-domain.c +++ b/drivers/cpuidle/cpuidle-psci-domain.c @@ -106,7 +106,8 @@ static void psci_pd_remove(void) struct psci_pd_provider *pd_provider, *it; struct generic_pm_domain *genpd; - list_for_each_entry_safe(pd_provider, it, &psci_pd_providers, link) { + list_for_each_entry_safe_reverse(pd_provider, it, + &psci_pd_providers, link) { of_genpd_del_provider(pd_provider->node); genpd = of_genpd_remove_last(pd_provider->node); diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c index 5c8a7084577b..9b3ce8948351 100644 --- a/drivers/dma-buf/dma-fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c @@ -123,12 +123,23 @@ static void dma_fence_array_release(struct dma_fence *fence) dma_fence_free(fence); } +static void dma_fence_array_set_deadline(struct dma_fence *fence, + ktime_t deadline) +{ + struct dma_fence_array *array = to_dma_fence_array(fence); + unsigned i; + + for (i = 0; i < array->num_fences; ++i) + dma_fence_set_deadline(array->fences[i], deadline); +} + const struct dma_fence_ops dma_fence_array_ops = { .get_driver_name = dma_fence_array_get_driver_name, .get_timeline_name = dma_fence_array_get_timeline_name, .enable_signaling = dma_fence_array_enable_signaling, .signaled = dma_fence_array_signaled, .release = dma_fence_array_release, + .set_deadline = dma_fence_array_set_deadline, }; EXPORT_SYMBOL(dma_fence_array_ops); diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c index a0d920576ba6..9663ba1bb6ac 100644 --- a/drivers/dma-buf/dma-fence-chain.c +++ b/drivers/dma-buf/dma-fence-chain.c @@ -206,6 +206,17 @@ static void dma_fence_chain_release(struct dma_fence *fence) dma_fence_free(fence); } + +static void dma_fence_chain_set_deadline(struct dma_fence *fence, + ktime_t deadline) +{ + dma_fence_chain_for_each(fence, fence) { + struct dma_fence *f = dma_fence_chain_contained(fence); + + dma_fence_set_deadline(f, deadline); + } +} + const struct dma_fence_ops dma_fence_chain_ops = { .use_64bit_seqno = true, .get_driver_name = dma_fence_chain_get_driver_name, @@ -213,6 +224,7 @@ const struct dma_fence_ops dma_fence_chain_ops = { .enable_signaling = dma_fence_chain_enable_signaling, .signaled = dma_fence_chain_signaled, .release = dma_fence_chain_release, + .set_deadline = dma_fence_chain_set_deadline, }; EXPORT_SYMBOL(dma_fence_chain_ops); diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 0de0482cd36e..f177c56269bb 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -913,6 +913,65 @@ err_free_cb: EXPORT_SYMBOL(dma_fence_wait_any_timeout); /** + * DOC: deadline hints + * + * In an ideal world, it would be possible to pipeline a workload sufficiently + * that a utilization based device frequency governor could arrive at a minimum + * frequency that meets the requirements of the use-case, in order to minimize + * power consumption. But in the real world there are many workloads which + * defy this ideal. For example, but not limited to: + * + * * Workloads that ping-pong between device and CPU, with alternating periods + * of CPU waiting for device, and device waiting on CPU. This can result in + * devfreq and cpufreq seeing idle time in their respective domains and in + * result reduce frequency. + * + * * Workloads that interact with a periodic time based deadline, such as double + * buffered GPU rendering vs vblank sync'd page flipping. In this scenario, + * missing a vblank deadline results in an *increase* in idle time on the GPU + * (since it has to wait an additional vblank period), sending a signal to + * the GPU's devfreq to reduce frequency, when in fact the opposite is what is + * needed. + * + * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline. + * The deadline hint provides a way for the waiting driver, or userspace, to + * convey an appropriate sense of urgency to the signaling driver. + * + * A deadline hint is given in absolute ktime (CLOCK_MONOTONIC for userspace + * facing APIs). The time could either be some point in the future (such as + * the vblank based deadline for page-flipping, or the start of a compositor's + * composition cycle), or the current time to indicate an immediate deadline + * hint (Ie. forward progress cannot be made until this fence is signaled). + * + * Multiple deadlines may be set on a given fence, even in parallel. See the + * documentation for &dma_fence_ops.set_deadline. + * + * The deadline hint is just that, a hint. The driver that created the fence + * may react by increasing frequency, making different scheduling choices, etc. + * Or doing nothing at all. + */ + +/** + * dma_fence_set_deadline - set desired fence-wait deadline hint + * @fence: the fence that is to be waited on + * @deadline: the time by which the waiter hopes for the fence to be + * signaled + * + * Give the fence signaler a hint about an upcoming deadline, such as + * vblank, by which point the waiter would prefer the fence to be + * signaled by. This is intended to give feedback to the fence signaler + * to aid in power management decisions, such as boosting GPU frequency + * if a periodic vblank deadline is approaching but the fence is not + * yet signaled.. + */ +void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline) +{ + if (fence->ops->set_deadline && !dma_fence_is_signaled(fence)) + fence->ops->set_deadline(fence, deadline); +} +EXPORT_SYMBOL(dma_fence_set_deadline); + +/** * dma_fence_describe - Dump fence describtion into seq_file * @fence: the 6fence to describe * @seq: the seq_file to put the textual description into diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 1c76aed8e262..2a594b754af1 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -684,6 +684,28 @@ long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, } EXPORT_SYMBOL_GPL(dma_resv_wait_timeout); +/** + * dma_resv_set_deadline - Set a deadline on reservation's objects fences + * @obj: the reservation object + * @usage: controls which fences to include, see enum dma_resv_usage. + * @deadline: the requested deadline (MONOTONIC) + * + * May be called without holding the dma_resv lock. Sets @deadline on + * all fences filtered by @usage. + */ +void dma_resv_set_deadline(struct dma_resv *obj, enum dma_resv_usage usage, + ktime_t deadline) +{ + struct dma_resv_iter cursor; + struct dma_fence *fence; + + dma_resv_iter_begin(&cursor, obj, usage); + dma_resv_for_each_fence_unlocked(&cursor, fence) { + dma_fence_set_deadline(fence, deadline); + } + dma_resv_iter_end(&cursor); +} +EXPORT_SYMBOL_GPL(dma_resv_set_deadline); /** * dma_resv_test_signaled - Test if a reservation object's fences have been diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 73140b854b31..c15928b8c5cc 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -14,7 +14,6 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/device.h> -#include <linux/of.h> #include "common.h" @@ -436,7 +435,7 @@ struct scmi_device *scmi_device_create(struct device_node *np, /* Nothing to do. */ if (!phead) { mutex_unlock(&scmi_requested_devices_mtx); - return scmi_dev; + return NULL; } /* Walk the list of requested devices for protocol and create them */ diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index d21c7eafd641..dbc474ff62b7 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -2221,8 +2221,8 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo, hash_init(info->pending_xfers); /* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */ - info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(MSG_TOKEN_MAX), - sizeof(long), GFP_KERNEL); + info->xfer_alloc_table = devm_bitmap_zalloc(dev, MSG_TOKEN_MAX, + GFP_KERNEL); if (!info->xfer_alloc_table) return -ENOMEM; @@ -2657,6 +2657,7 @@ static int scmi_probe(struct platform_device *pdev) struct scmi_handle *handle; const struct scmi_desc *desc; struct scmi_info *info; + bool coex = IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX); struct device *dev = &pdev->dev; struct device_node *child, *np = dev->of_node; @@ -2731,16 +2732,13 @@ static int scmi_probe(struct platform_device *pdev) dev_warn(dev, "Failed to setup SCMI debugfs.\n"); if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { - bool coex = - IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX); - ret = scmi_debugfs_raw_mode_setup(info); if (!coex) { if (ret) goto clear_dev_req_notifier; - /* Bail out anyway when coex enabled */ - return ret; + /* Bail out anyway when coex disabled. */ + return 0; } /* Coex enabled, carry on in any case. */ @@ -2764,6 +2762,8 @@ static int scmi_probe(struct platform_device *pdev) ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE); if (ret) { dev_err(dev, "unable to communicate with SCMI\n"); + if (coex) + return 0; goto notification_exit; } diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c index 0d9c9538b7f4..112c285deb97 100644 --- a/drivers/firmware/arm_scmi/mailbox.c +++ b/drivers/firmware/arm_scmi/mailbox.c @@ -52,6 +52,39 @@ static bool mailbox_chan_available(struct device_node *of_node, int idx) "#mbox-cells", idx, NULL); } +static int mailbox_chan_validate(struct device *cdev) +{ + int num_mb, num_sh, ret = 0; + struct device_node *np = cdev->of_node; + + num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); + num_sh = of_count_phandle_with_args(np, "shmem", NULL); + /* Bail out if mboxes and shmem descriptors are inconsistent */ + if (num_mb <= 0 || num_sh > 2 || num_mb != num_sh) { + dev_warn(cdev, "Invalid channel descriptor for '%s'\n", + of_node_full_name(np)); + return -EINVAL; + } + + if (num_sh > 1) { + struct device_node *np_tx, *np_rx; + + np_tx = of_parse_phandle(np, "shmem", 0); + np_rx = of_parse_phandle(np, "shmem", 1); + /* SCMI Tx and Rx shared mem areas have to be distinct */ + if (!np_tx || !np_rx || np_tx == np_rx) { + dev_warn(cdev, "Invalid shmem descriptor for '%s'\n", + of_node_full_name(np)); + ret = -EINVAL; + } + + of_node_put(np_tx); + of_node_put(np_rx); + } + + return ret; +} + static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx) { @@ -64,6 +97,10 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, resource_size_t size; struct resource res; + ret = mailbox_chan_validate(cdev); + if (ret) + return ret; + smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL); if (!smbox) return -ENOMEM; diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c index f54e6fdf08e2..f80a9af3d16e 100644 --- a/drivers/firmware/efi/earlycon.c +++ b/drivers/firmware/efi/earlycon.c @@ -215,6 +215,14 @@ efi_earlycon_write(struct console *con, const char *str, unsigned int num) } } +static bool __initdata fb_probed; + +void __init efi_earlycon_reprobe(void) +{ + if (fb_probed) + setup_earlycon("efifb"); +} + static int __init efi_earlycon_setup(struct earlycon_device *device, const char *opt) { @@ -222,15 +230,17 @@ static int __init efi_earlycon_setup(struct earlycon_device *device, u16 xres, yres; u32 i; - if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) + fb_wb = opt && !strcmp(opt, "ram"); + + if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) { + fb_probed = true; return -ENODEV; + } fb_base = screen_info.lfb_base; if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) fb_base |= (u64)screen_info.ext_lfb_base << 32; - fb_wb = opt && !strcmp(opt, "ram"); - si = &screen_info; xres = si->lfb_width; yres = si->lfb_height; diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c index 2c16080e1f71..ef0820f1a924 100644 --- a/drivers/firmware/efi/efi-init.c +++ b/drivers/firmware/efi/efi-init.c @@ -72,6 +72,9 @@ static void __init init_screen_info(void) if (memblock_is_map_memory(screen_info.lfb_base)) memblock_mark_nomap(screen_info.lfb_base, screen_info.lfb_size); + + if (IS_ENABLED(CONFIG_EFI_EARLYCON)) + efi_earlycon_reprobe(); } } diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot index 43e9a4cab9f5..ccdd6a130d98 100644 --- a/drivers/firmware/efi/libstub/Makefile.zboot +++ b/drivers/firmware/efi/libstub/Makefile.zboot @@ -44,4 +44,4 @@ OBJCOPYFLAGS_vmlinuz.efi := -O binary $(obj)/vmlinuz.efi: $(obj)/vmlinuz.efi.elf FORCE $(call if_changed,objcopy) -targets += zboot-header.o vmlinuz.o vmlinuz.efi.elf vmlinuz.efi +targets += zboot-header.o vmlinuz vmlinuz.o vmlinuz.efi.elf vmlinuz.efi diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index d4a6b12a8741..770b8ecb7398 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -85,8 +85,10 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, } } - if (image->image_base != _text) + if (image->image_base != _text) { efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n"); + image->image_base = _text; + } if (!IS_ALIGNED((u64)_text, SEGMENT_ALIGN)) efi_err("FIRMWARE BUG: kernel image not aligned on %dk boundary\n", @@ -139,6 +141,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, *image_addr = *reserve_addr; memcpy((void *)*image_addr, _text, kernel_size); caches_clean_inval_pou(*image_addr, *image_addr + kernel_codesize); + efi_remap_image(*image_addr, *reserve_size, kernel_codesize); return EFI_SUCCESS; } diff --git a/drivers/firmware/efi/libstub/arm64.c b/drivers/firmware/efi/libstub/arm64.c index 399770266372..8aad8c49d43f 100644 --- a/drivers/firmware/efi/libstub/arm64.c +++ b/drivers/firmware/efi/libstub/arm64.c @@ -16,20 +16,43 @@ static bool system_needs_vamap(void) { - const u8 *type1_family = efi_get_smbios_string(1, family); + const struct efi_smbios_type4_record *record; + const u32 __aligned(1) *socid; + const u8 *version; /* * Ampere eMAG, Altra, and Altra Max machines crash in SetTime() if - * SetVirtualAddressMap() has not been called prior. + * SetVirtualAddressMap() has not been called prior. Most Altra systems + * can be identified by the SMCCC soc ID, which is conveniently exposed + * via the type 4 SMBIOS records. Otherwise, test the processor version + * field. eMAG systems all appear to have the processor version field + * set to "eMAG". */ - if (!type1_family || ( - strcmp(type1_family, "eMAG") && - strcmp(type1_family, "Altra") && - strcmp(type1_family, "Altra Max"))) + record = (struct efi_smbios_type4_record *)efi_get_smbios_record(4); + if (!record) return false; - efi_warn("Working around broken SetVirtualAddressMap()\n"); - return true; + socid = (u32 *)record->processor_id; + switch (*socid & 0xffff000f) { + static char const altra[] = "Ampere(TM) Altra(TM) Processor"; + static char const emag[] = "eMAG"; + + default: + version = efi_get_smbios_string(&record->header, 4, + processor_version); + if (!version || (strncmp(version, altra, sizeof(altra) - 1) && + strncmp(version, emag, sizeof(emag) - 1))) + break; + + fallthrough; + + case 0x0a160001: // Altra + case 0x0a160002: // Altra Max + efi_warn("Working around broken SetVirtualAddressMap()\n"); + return true; + } + + return false; } efi_status_t check_platform_features(void) diff --git a/drivers/firmware/efi/libstub/efi-stub-entry.c b/drivers/firmware/efi/libstub/efi-stub-entry.c index 5245c4f031c0..cc4dcaea67fa 100644 --- a/drivers/firmware/efi/libstub/efi-stub-entry.c +++ b/drivers/firmware/efi/libstub/efi-stub-entry.c @@ -5,6 +5,15 @@ #include "efistub.h" +static unsigned long screen_info_offset; + +struct screen_info *alloc_screen_info(void) +{ + if (IS_ENABLED(CONFIG_ARM)) + return __alloc_screen_info(); + return (void *)&screen_info + screen_info_offset; +} + /* * EFI entry point for the generic EFI stub used by ARM, arm64, RISC-V and * LoongArch. This is the entrypoint that is described in the PE/COFF header @@ -56,6 +65,8 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle, return status; } + screen_info_offset = image_addr - (unsigned long)image->image_base; + status = efi_stub_common(handle, image, image_addr, cmdline_ptr); efi_free(image_size, image_addr); diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c index 2955c1ac6a36..f9c1e8a2bd1d 100644 --- a/drivers/firmware/efi/libstub/efi-stub.c +++ b/drivers/firmware/efi/libstub/efi-stub.c @@ -47,11 +47,6 @@ static u64 virtmap_base = EFI_RT_VIRTUAL_BASE; static bool flat_va_mapping = (EFI_RT_VIRTUAL_OFFSET != 0); -struct screen_info * __weak alloc_screen_info(void) -{ - return &screen_info; -} - void __weak free_screen_info(struct screen_info *si) { } diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index 6bd3bb86d967..148013bcb5f8 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -1062,6 +1062,7 @@ efi_enable_reset_attack_mitigation(void) { } void efi_retrieve_tpm2_eventlog(void); struct screen_info *alloc_screen_info(void); +struct screen_info *__alloc_screen_info(void); void free_screen_info(struct screen_info *si); void efi_cache_sync_image(unsigned long image_base, @@ -1074,6 +1075,8 @@ struct efi_smbios_record { u16 handle; }; +const struct efi_smbios_record *efi_get_smbios_record(u8 type); + struct efi_smbios_type1_record { struct efi_smbios_record header; @@ -1087,14 +1090,46 @@ struct efi_smbios_type1_record { u8 family; }; -#define efi_get_smbios_string(__type, __name) ({ \ - int size = sizeof(struct efi_smbios_type ## __type ## _record); \ +struct efi_smbios_type4_record { + struct efi_smbios_record header; + + u8 socket; + u8 processor_type; + u8 processor_family; + u8 processor_manufacturer; + u8 processor_id[8]; + u8 processor_version; + u8 voltage; + u16 external_clock; + u16 max_speed; + u16 current_speed; + u8 status; + u8 processor_upgrade; + u16 l1_cache_handle; + u16 l2_cache_handle; + u16 l3_cache_handle; + u8 serial_number; + u8 asset_tag; + u8 part_number; + u8 core_count; + u8 enabled_core_count; + u8 thread_count; + u16 processor_characteristics; + u16 processor_family2; + u16 core_count2; + u16 enabled_core_count2; + u16 thread_count2; + u16 thread_enabled; +}; + +#define efi_get_smbios_string(__record, __type, __name) ({ \ int off = offsetof(struct efi_smbios_type ## __type ## _record, \ __name); \ - __efi_get_smbios_string(__type, off, size); \ + __efi_get_smbios_string((__record), __type, off); \ }) -const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize); +const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record, + u8 type, int offset); void efi_remap_image(unsigned long image_base, unsigned alloc_size, unsigned long code_size); diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c index 1692d19ae80f..32c7a54923b4 100644 --- a/drivers/firmware/efi/libstub/randomalloc.c +++ b/drivers/firmware/efi/libstub/randomalloc.c @@ -101,6 +101,7 @@ efi_status_t efi_random_alloc(unsigned long size, * to calculate the randomly chosen address, and allocate it directly * using EFI_ALLOCATE_ADDRESS. */ + status = EFI_OUT_OF_RESOURCES; for (map_offset = 0; map_offset < map->map_size; map_offset += map->desc_size) { efi_memory_desc_t *md = (void *)map->map + map_offset; efi_physical_addr_t target; diff --git a/drivers/firmware/efi/libstub/screen_info.c b/drivers/firmware/efi/libstub/screen_info.c index 8e76a8b384ba..4be1c4d1f922 100644 --- a/drivers/firmware/efi/libstub/screen_info.c +++ b/drivers/firmware/efi/libstub/screen_info.c @@ -15,18 +15,11 @@ * early, but it only works if the EFI stub is part of the core kernel image * itself. The zboot decompressor can only use the configuration table * approach. - * - * In order to support both methods from the same build of the EFI stub - * library, provide this dummy global definition of struct screen_info. If it - * is required to satisfy a link dependency, it means we need to override the - * __weak alloc and free methods with the ones below, and those will be pulled - * in as well. */ -struct screen_info screen_info; static efi_guid_t screen_info_guid = LINUX_EFI_SCREEN_INFO_TABLE_GUID; -struct screen_info *alloc_screen_info(void) +struct screen_info *__alloc_screen_info(void) { struct screen_info *si; efi_status_t status; diff --git a/drivers/firmware/efi/libstub/smbios.c b/drivers/firmware/efi/libstub/smbios.c index 460418b7f5f5..c217de2cc8d5 100644 --- a/drivers/firmware/efi/libstub/smbios.c +++ b/drivers/firmware/efi/libstub/smbios.c @@ -22,21 +22,30 @@ struct efi_smbios_protocol { u8 minor_version; }; -const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize) +const struct efi_smbios_record *efi_get_smbios_record(u8 type) { struct efi_smbios_record *record; efi_smbios_protocol_t *smbios; efi_status_t status; u16 handle = 0xfffe; - const u8 *strtable; status = efi_bs_call(locate_protocol, &EFI_SMBIOS_PROTOCOL_GUID, NULL, (void **)&smbios) ?: efi_call_proto(smbios, get_next, &handle, &type, &record, NULL); if (status != EFI_SUCCESS) return NULL; + return record; +} + +const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record, + u8 type, int offset) +{ + const u8 *strtable; + + if (!record) + return NULL; - strtable = (u8 *)record + recsize; + strtable = (u8 *)record + record->length; for (int i = 1; i < ((u8 *)record)[offset]; i++) { int len = strlen(strtable); diff --git a/drivers/firmware/efi/libstub/zboot-header.S b/drivers/firmware/efi/libstub/zboot-header.S index ec4525d40e0c..445cb646eaaa 100644 --- a/drivers/firmware/efi/libstub/zboot-header.S +++ b/drivers/firmware/efi/libstub/zboot-header.S @@ -63,7 +63,7 @@ __efistub_efi_zboot_header: .long .Lefi_header_end - .Ldoshdr .long 0 .short IMAGE_SUBSYSTEM_EFI_APPLICATION - .short 0 + .short IMAGE_DLL_CHARACTERISTICS_NX_COMPAT #ifdef CONFIG_64BIT .quad 0, 0, 0, 0 #else diff --git a/drivers/firmware/efi/libstub/zboot.c b/drivers/firmware/efi/libstub/zboot.c index ba234e062a1a..6105e5e2eda4 100644 --- a/drivers/firmware/efi/libstub/zboot.c +++ b/drivers/firmware/efi/libstub/zboot.c @@ -57,6 +57,11 @@ void __weak efi_cache_sync_image(unsigned long image_base, // executable code loaded into memory to be safe for execution. } +struct screen_info *alloc_screen_info(void) +{ + return __alloc_screen_info(); +} + asmlinkage efi_status_t __efiapi efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab) { diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c index f06fdacc9bc8..456d0e5eaf78 100644 --- a/drivers/firmware/efi/sysfb_efi.c +++ b/drivers/firmware/efi/sysfb_efi.c @@ -272,6 +272,14 @@ static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = { "IdeaPad Duet 3 10IGL5"), }, }, + { + /* Lenovo Yoga Book X91F / X91L */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), + /* Non exact match to match F + L versions */ + DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"), + }, + }, {}, }; @@ -341,7 +349,7 @@ static const struct fwnode_operations efifb_fwnode_ops = { #ifdef CONFIG_EFI static struct fwnode_handle efifb_fwnode; -__init void sysfb_apply_efi_quirks(struct platform_device *pd) +__init void sysfb_apply_efi_quirks(void) { if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS)) @@ -355,7 +363,10 @@ __init void sysfb_apply_efi_quirks(struct platform_device *pd) screen_info.lfb_height = temp; screen_info.lfb_linelength = 4 * screen_info.lfb_width; } +} +__init void sysfb_set_efifb_fwnode(struct platform_device *pd) +{ if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI && IS_ENABLED(CONFIG_PCI)) { fwnode_init(&efifb_fwnode, &efifb_fwnode_ops); pd->dev.fwnode = &efifb_fwnode; diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 468d4d5ab550..b1e11f85b805 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -1479,7 +1479,7 @@ static int qcom_scm_probe(struct platform_device *pdev) init_completion(&__scm->waitq_comp); - irq = platform_get_irq(pdev, 0); + irq = platform_get_irq_optional(pdev, 0); if (irq < 0) { if (irq != -ENXIO) return irq; diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c index 3fd3563d962b..3c197db42c9d 100644 --- a/drivers/firmware/sysfb.c +++ b/drivers/firmware/sysfb.c @@ -81,6 +81,8 @@ static __init int sysfb_init(void) if (disabled) goto unlock_mutex; + sysfb_apply_efi_quirks(); + /* try to create a simple-framebuffer device */ compatible = sysfb_parse_mode(si, &mode); if (compatible) { @@ -107,7 +109,7 @@ static __init int sysfb_init(void) goto unlock_mutex; } - sysfb_apply_efi_quirks(pd); + sysfb_set_efifb_fwnode(pd); ret = platform_device_add_data(pd, si, sizeof(*si)); if (ret) diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c index ce9c007ed66f..82c64cb9f531 100644 --- a/drivers/firmware/sysfb_simplefb.c +++ b/drivers/firmware/sysfb_simplefb.c @@ -141,7 +141,7 @@ __init struct platform_device *sysfb_create_simplefb(const struct screen_info *s if (!pd) return ERR_PTR(-ENOMEM); - sysfb_apply_efi_quirks(pd); + sysfb_set_efifb_fwnode(pd); ret = platform_device_add_resources(pd, &res, 1); if (ret) diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index acd83d29c866..ce86a1850305 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -206,7 +206,7 @@ static int do_feature_check_call(const u32 api_id) } /* Add new entry if not present */ - feature_data = kmalloc(sizeof(*feature_data), GFP_KERNEL); + feature_data = kmalloc(sizeof(*feature_data), GFP_ATOMIC); if (!feature_data) return -ENOMEM; diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index d8a421ce26a8..31ae0adbb295 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -536,6 +536,9 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) if (ACPI_FAILURE(status)) return; + if (acpi_quirk_skip_gpio_event_handlers()) + return; + acpi_walk_resources(handle, METHOD_NAME__AEI, acpi_gpiochip_alloc_event, acpi_gpio); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index d0ef23bfb9e6..0e071fbc9154 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -991,8 +991,5 @@ void dcn30_prepare_bandwidth(struct dc *dc, dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); dcn20_prepare_bandwidth(dc, context); - - dc_dmub_srv_p_state_delegate(dc, - context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching, context); } diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index a554c79dcd39..9020bf820bc8 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -100,7 +100,6 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags) { struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm); struct platform_device *pdev = to_platform_device(drm->dev); - struct resource *res; u32 version; int ret; @@ -115,8 +114,7 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags) atomic_set(&hdlcd->dma_end_count, 0); #endif - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hdlcd->mmio = devm_ioremap_resource(drm->dev, res); + hdlcd->mmio = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(hdlcd->mmio)) { DRM_ERROR("failed to map control registers area\n"); ret = PTR_ERR(hdlcd->mmio); diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 0597e6ad56e7..c03cfd57b752 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -724,8 +724,7 @@ static int malidp_bind(struct device *dev) hwdev->hw = (struct malidp_hw *)of_device_get_match_data(dev); malidp->dev = hwdev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hwdev->regs = devm_ioremap_resource(dev, res); + hwdev->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(hwdev->regs)) return PTR_ERR(hwdev->regs); diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 12e8f30c65f7..f076a09afac0 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -220,6 +220,18 @@ config DRM_PARADE_PS8640 The PS8640 is a high-performance and low-power MIPI DSI to eDP converter +config DRM_SAMSUNG_DSIM + tristate "Samsung MIPI DSIM bridge driver" + depends on COMMON_CLK + depends on OF && HAS_IOMEM + select DRM_KMS_HELPER + select DRM_MIPI_DSI + select DRM_PANEL_BRIDGE + help + The Samsung MIPI DSIM bridge controller driver. + This MIPI DSIM bridge can be found it on Exynos SoCs and + NXP's i.MX8M Mini/Nano. + config DRM_SIL_SII8620 tristate "Silicon Image SII8620 HDMI/MHL bridge" depends on OF diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 52f6e8b4a821..2b892b7ed59e 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o obj-$(CONFIG_DRM_PARADE_PS8640) += parade-ps8640.o +obj-$(CONFIG_DRM_SAMSUNG_DSIM) += samsung-dsim.o obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o obj-$(CONFIG_DRM_SII902X) += sii902x.o obj-$(CONFIG_DRM_SII9234) += sii9234.o diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c index fdfeadcefe80..7e3e56441aed 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7533.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c @@ -103,22 +103,19 @@ void adv7533_dsi_power_off(struct adv7511 *adv) enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv, const struct drm_display_mode *mode) { - int lanes; + unsigned long max_lane_freq; struct mipi_dsi_device *dsi = adv->dsi; + u8 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); - if (mode->clock > 80000) - lanes = 4; - else - lanes = 3; - - /* - * TODO: add support for dynamic switching of lanes - * by using the bridge pre_enable() op . Till then filter - * out the modes which shall need different number of lanes - * than what was configured in the device tree. - */ - if (lanes != dsi->lanes) - return MODE_BAD; + /* Check max clock for either 7533 or 7535 */ + if (mode->clock > (adv->type == ADV7533 ? 80000 : 148500)) + return MODE_CLOCK_HIGH; + + /* Check max clock for each lane */ + max_lane_freq = (adv->type == ADV7533 ? 800000 : 891000); + + if (mode->clock * bpp > max_lane_freq * adv->num_dsi_lanes) + return MODE_CLOCK_HIGH; return MODE_OK; } diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c index 5dbfc7226b31..f50d65f54314 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c @@ -1278,7 +1278,7 @@ err_disable_pclk: return ret; } -static int cdns_dsi_drm_remove(struct platform_device *pdev) +static void cdns_dsi_drm_remove(struct platform_device *pdev) { struct cdns_dsi *dsi = platform_get_drvdata(pdev); @@ -1288,8 +1288,6 @@ static int cdns_dsi_drm_remove(struct platform_device *pdev) dsi->platform_ops->deinit(dsi); pm_runtime_disable(&pdev->dev); - - return 0; } static const struct of_device_id cdns_dsi_of_match[] = { @@ -1303,7 +1301,7 @@ MODULE_DEVICE_TABLE(of, cdns_dsi_of_match); static struct platform_driver cdns_dsi_platform_driver = { .probe = cdns_dsi_drm_probe, - .remove = cdns_dsi_drm_remove, + .remove_new = cdns_dsi_drm_remove, .driver = { .name = "cdns-dsi", .of_match_table = cdns_dsi_of_match, diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c index 9a12449ad7b8..56ae511367b1 100644 --- a/drivers/gpu/drm/bridge/display-connector.c +++ b/drivers/gpu/drm/bridge/display-connector.c @@ -271,12 +271,9 @@ static int display_connector_probe(struct platform_device *pdev) type == DRM_MODE_CONNECTOR_DisplayPort) { conn->hpd_gpio = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN); - if (IS_ERR(conn->hpd_gpio)) { - if (PTR_ERR(conn->hpd_gpio) != -EPROBE_DEFER) - dev_err(&pdev->dev, - "Unable to retrieve HPD GPIO\n"); - return PTR_ERR(conn->hpd_gpio); - } + if (IS_ERR(conn->hpd_gpio)) + return dev_err_probe(&pdev->dev, PTR_ERR(conn->hpd_gpio), + "Unable to retrieve HPD GPIO\n"); conn->hpd_irq = gpiod_to_irq(conn->hpd_gpio); } else { @@ -382,7 +379,7 @@ static int display_connector_probe(struct platform_device *pdev) return 0; } -static int display_connector_remove(struct platform_device *pdev) +static void display_connector_remove(struct platform_device *pdev) { struct display_connector *conn = platform_get_drvdata(pdev); @@ -396,8 +393,6 @@ static int display_connector_remove(struct platform_device *pdev) if (!IS_ERR(conn->bridge.ddc)) i2c_put_adapter(conn->bridge.ddc); - - return 0; } static const struct of_device_id display_connector_match[] = { @@ -426,7 +421,7 @@ MODULE_DEVICE_TABLE(of, display_connector_match); static struct platform_driver display_connector_driver = { .probe = display_connector_probe, - .remove = display_connector_remove, + .remove_new = display_connector_remove, .driver = { .name = "display-connector", .of_match_table = display_connector_match, diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c index 6bac160b395b..450b352914f4 100644 --- a/drivers/gpu/drm/bridge/fsl-ldb.c +++ b/drivers/gpu/drm/bridge/fsl-ldb.c @@ -347,13 +347,11 @@ static int fsl_ldb_probe(struct platform_device *pdev) return 0; } -static int fsl_ldb_remove(struct platform_device *pdev) +static void fsl_ldb_remove(struct platform_device *pdev) { struct fsl_ldb *fsl_ldb = platform_get_drvdata(pdev); drm_bridge_remove(&fsl_ldb->bridge); - - return 0; } static const struct of_device_id fsl_ldb_match[] = { @@ -367,7 +365,7 @@ MODULE_DEVICE_TABLE(of, fsl_ldb_match); static struct platform_driver fsl_ldb_driver = { .probe = fsl_ldb_probe, - .remove = fsl_ldb_remove, + .remove_new = fsl_ldb_remove, .driver = { .name = "fsl-ldb", .of_match_table = fsl_ldb_match, diff --git a/drivers/gpu/drm/bridge/imx/imx8qm-ldb-drv.c b/drivers/gpu/drm/bridge/imx/imx8qm-ldb-drv.c index 178af8d2d80b..386032a02599 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qm-ldb-drv.c +++ b/drivers/gpu/drm/bridge/imx/imx8qm-ldb-drv.c @@ -532,7 +532,7 @@ static int imx8qm_ldb_probe(struct platform_device *pdev) return ret; } -static int imx8qm_ldb_remove(struct platform_device *pdev) +static void imx8qm_ldb_remove(struct platform_device *pdev) { struct imx8qm_ldb *imx8qm_ldb = platform_get_drvdata(pdev); struct ldb *ldb = &imx8qm_ldb->base; @@ -540,8 +540,6 @@ static int imx8qm_ldb_remove(struct platform_device *pdev) ldb_remove_bridge_helper(ldb); pm_runtime_disable(&pdev->dev); - - return 0; } static int __maybe_unused imx8qm_ldb_runtime_suspend(struct device *dev) @@ -573,7 +571,7 @@ MODULE_DEVICE_TABLE(of, imx8qm_ldb_dt_ids); static struct platform_driver imx8qm_ldb_driver = { .probe = imx8qm_ldb_probe, - .remove = imx8qm_ldb_remove, + .remove_new = imx8qm_ldb_remove, .driver = { .pm = &imx8qm_ldb_pm_ops, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb-drv.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb-drv.c index 63948d5d20fd..c806576b1e22 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb-drv.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb-drv.c @@ -667,7 +667,7 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev) return ret; } -static int imx8qxp_ldb_remove(struct platform_device *pdev) +static void imx8qxp_ldb_remove(struct platform_device *pdev) { struct imx8qxp_ldb *imx8qxp_ldb = platform_get_drvdata(pdev); struct ldb *ldb = &imx8qxp_ldb->base; @@ -675,8 +675,6 @@ static int imx8qxp_ldb_remove(struct platform_device *pdev) ldb_remove_bridge_helper(ldb); pm_runtime_disable(&pdev->dev); - - return 0; } static int __maybe_unused imx8qxp_ldb_runtime_suspend(struct device *dev) @@ -708,7 +706,7 @@ MODULE_DEVICE_TABLE(of, imx8qxp_ldb_dt_ids); static struct platform_driver imx8qxp_ldb_driver = { .probe = imx8qxp_ldb_probe, - .remove = imx8qxp_ldb_remove, + .remove_new = imx8qxp_ldb_remove, .driver = { .pm = &imx8qxp_ldb_pm_ops, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c index 503bd8db8afe..d0868a6ac6c9 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c @@ -357,7 +357,7 @@ free_child: return ret; } -static int imx8qxp_pc_bridge_remove(struct platform_device *pdev) +static void imx8qxp_pc_bridge_remove(struct platform_device *pdev) { struct imx8qxp_pc *pc = platform_get_drvdata(pdev); struct imx8qxp_pc_channel *ch; @@ -374,8 +374,6 @@ static int imx8qxp_pc_bridge_remove(struct platform_device *pdev) } pm_runtime_disable(&pdev->dev); - - return 0; } static int __maybe_unused imx8qxp_pc_runtime_suspend(struct device *dev) @@ -435,7 +433,7 @@ MODULE_DEVICE_TABLE(of, imx8qxp_pc_dt_ids); static struct platform_driver imx8qxp_pc_bridge_driver = { .probe = imx8qxp_pc_bridge_probe, - .remove = imx8qxp_pc_bridge_remove, + .remove_new = imx8qxp_pc_bridge_remove, .driver = { .pm = &imx8qxp_pc_pm_ops, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c index 9e5f2b4dc2e5..ed8b7a4e0e11 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c @@ -313,7 +313,7 @@ imx8qxp_pixel_link_find_next_bridge(struct imx8qxp_pixel_link *pl) } /* specially select the next bridge with companion PXL2DPI */ - if (of_find_property(remote, "fsl,companion-pxl2dpi", NULL)) + if (of_property_present(remote, "fsl,companion-pxl2dpi")) bridge_sel = ep_cnt; ep_cnt++; @@ -398,13 +398,11 @@ static int imx8qxp_pixel_link_bridge_probe(struct platform_device *pdev) return ret; } -static int imx8qxp_pixel_link_bridge_remove(struct platform_device *pdev) +static void imx8qxp_pixel_link_bridge_remove(struct platform_device *pdev) { struct imx8qxp_pixel_link *pl = platform_get_drvdata(pdev); drm_bridge_remove(&pl->bridge); - - return 0; } static const struct of_device_id imx8qxp_pixel_link_dt_ids[] = { @@ -416,7 +414,7 @@ MODULE_DEVICE_TABLE(of, imx8qxp_pixel_link_dt_ids); static struct platform_driver imx8qxp_pixel_link_bridge_driver = { .probe = imx8qxp_pixel_link_bridge_probe, - .remove = imx8qxp_pixel_link_bridge_remove, + .remove_new = imx8qxp_pixel_link_bridge_remove, .driver = { .of_match_table = imx8qxp_pixel_link_dt_ids, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c index d0fec82f0cf8..4a886cb808ca 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c @@ -455,15 +455,13 @@ static int imx8qxp_pxl2dpi_bridge_probe(struct platform_device *pdev) return ret; } -static int imx8qxp_pxl2dpi_bridge_remove(struct platform_device *pdev) +static void imx8qxp_pxl2dpi_bridge_remove(struct platform_device *pdev) { struct imx8qxp_pxl2dpi *p2d = platform_get_drvdata(pdev); drm_bridge_remove(&p2d->bridge); pm_runtime_disable(&pdev->dev); - - return 0; } static const struct of_device_id imx8qxp_pxl2dpi_dt_ids[] = { @@ -474,7 +472,7 @@ MODULE_DEVICE_TABLE(of, imx8qxp_pxl2dpi_dt_ids); static struct platform_driver imx8qxp_pxl2dpi_bridge_driver = { .probe = imx8qxp_pxl2dpi_bridge_probe, - .remove = imx8qxp_pxl2dpi_bridge_remove, + .remove_new = imx8qxp_pxl2dpi_bridge_remove, .driver = { .of_match_table = imx8qxp_pxl2dpi_dt_ids, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c index bc451b2a77c2..abaf6e23775e 100644 --- a/drivers/gpu/drm/bridge/ite-it6505.c +++ b/drivers/gpu/drm/bridge/ite-it6505.c @@ -258,12 +258,12 @@ #define REG_AUD_INFOFRAM_SUM 0xFB /* the following six registers are in bank1 */ -#define REG_DRV_0_DB_800_MV 0x7E -#define REG_PRE_0_DB_800_MV 0x7F -#define REG_PRE_3P5_DB_800_MV 0x81 -#define REG_SSC_CTRL0 0x88 -#define REG_SSC_CTRL1 0x89 -#define REG_SSC_CTRL2 0x8A +#define REG_DRV_0_DB_800_MV 0x17E +#define REG_PRE_0_DB_800_MV 0x17F +#define REG_PRE_3P5_DB_800_MV 0x181 +#define REG_SSC_CTRL0 0x188 +#define REG_SSC_CTRL1 0x189 +#define REG_SSC_CTRL2 0x18A #define RBR DP_LINK_BW_1_62 #define HBR DP_LINK_BW_2_7 @@ -489,7 +489,7 @@ static const struct it6505_audio_sample_rate_map audio_sample_rate_map[] = { }; static const struct regmap_range it6505_bridge_volatile_ranges[] = { - { .range_min = 0, .range_max = 0xFF }, + { .range_min = 0, .range_max = 0x1FF }, }; static const struct regmap_access_table it6505_bridge_volatile_table = { @@ -497,11 +497,27 @@ static const struct regmap_access_table it6505_bridge_volatile_table = { .n_yes_ranges = ARRAY_SIZE(it6505_bridge_volatile_ranges), }; +static const struct regmap_range_cfg it6505_regmap_banks[] = { + { + .name = "it6505", + .range_min = 0x00, + .range_max = 0x1FF, + .selector_reg = REG_BANK_SEL, + .selector_mask = 0x1, + .selector_shift = 0, + .window_start = 0x00, + .window_len = 0x100, + }, +}; + static const struct regmap_config it6505_regmap_config = { .reg_bits = 8, .val_bits = 8, .volatile_table = &it6505_bridge_volatile_table, .cache_type = REGCACHE_NONE, + .ranges = it6505_regmap_banks, + .num_ranges = ARRAY_SIZE(it6505_regmap_banks), + .max_register = 0x1FF, }; static int it6505_read(struct it6505 *it6505, unsigned int reg_addr) @@ -1267,7 +1283,6 @@ static void it6505_init(struct it6505 *it6505) it6505_write(it6505, REG_TIME_STMP_CTRL, EN_SSC_GAT | EN_ENHANCE_VID_STMP | EN_ENHANCE_AUD_STMP); it6505_write(it6505, REG_INFOFRAME_CTRL, 0x00); - it6505_write(it6505, REG_BANK_SEL, 0x01); it6505_write(it6505, REG_DRV_0_DB_800_MV, afe_setting_table[it6505->afe_setting][0]); it6505_write(it6505, REG_PRE_0_DB_800_MV, @@ -1277,7 +1292,6 @@ static void it6505_init(struct it6505 *it6505) it6505_write(it6505, REG_SSC_CTRL0, 0x9E); it6505_write(it6505, REG_SSC_CTRL1, 0x1C); it6505_write(it6505, REG_SSC_CTRL2, 0x42); - it6505_write(it6505, REG_BANK_SEL, 0x00); } static void it6505_video_disable(struct it6505 *it6505) @@ -1506,11 +1520,9 @@ static void it6505_setup_ssc(struct it6505 *it6505) it6505_set_bits(it6505, REG_TRAIN_CTRL0, SPREAD_AMP_5, it6505->enable_ssc ? SPREAD_AMP_5 : 0x00); if (it6505->enable_ssc) { - it6505_write(it6505, REG_BANK_SEL, 0x01); it6505_write(it6505, REG_SSC_CTRL0, 0x9E); it6505_write(it6505, REG_SSC_CTRL1, 0x1C); it6505_write(it6505, REG_SSC_CTRL2, 0x42); - it6505_write(it6505, REG_BANK_SEL, 0x00); it6505_write(it6505, REG_SP_CTRL0, 0x07); it6505_write(it6505, REG_IP_CTRL1, 0x29); it6505_write(it6505, REG_IP_CTRL2, 0x03); diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c index 2019a8167d69..b40baced1331 100644 --- a/drivers/gpu/drm/bridge/lontium-lt8912b.c +++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c @@ -676,8 +676,8 @@ static int lt8912_parse_dt(struct lt8912 *lt) lt->hdmi_port = of_drm_find_bridge(port_node); if (!lt->hdmi_port) { - dev_err(lt->dev, "%s: Failed to get hdmi port\n", __func__); - ret = -ENODEV; + ret = -EPROBE_DEFER; + dev_err_probe(lt->dev, ret, "%s: Failed to get hdmi port\n", __func__); goto err_free_host_node; } diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c index 39e7004de720..67368f23d4aa 100644 --- a/drivers/gpu/drm/bridge/lvds-codec.c +++ b/drivers/gpu/drm/bridge/lvds-codec.c @@ -215,13 +215,11 @@ static int lvds_codec_probe(struct platform_device *pdev) return 0; } -static int lvds_codec_remove(struct platform_device *pdev) +static void lvds_codec_remove(struct platform_device *pdev) { struct lvds_codec *lvds_codec = platform_get_drvdata(pdev); drm_bridge_remove(&lvds_codec->bridge); - - return 0; } static const struct of_device_id lvds_codec_match[] = { @@ -243,7 +241,7 @@ MODULE_DEVICE_TABLE(of, lvds_codec_match); static struct platform_driver lvds_codec_driver = { .probe = lvds_codec_probe, - .remove = lvds_codec_remove, + .remove_new = lvds_codec_remove, .driver = { .name = "lvds-codec", .of_match_table = lvds_codec_match, diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index 6dc2a4e191d7..4a5f5c4f5dcc 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -1199,7 +1199,7 @@ static int nwl_dsi_probe(struct platform_device *pdev) return 0; } -static int nwl_dsi_remove(struct platform_device *pdev) +static void nwl_dsi_remove(struct platform_device *pdev) { struct nwl_dsi *dsi = platform_get_drvdata(pdev); @@ -1207,12 +1207,11 @@ static int nwl_dsi_remove(struct platform_device *pdev) mipi_dsi_host_unregister(&dsi->dsi_host); drm_bridge_remove(&dsi->bridge); pm_runtime_disable(&pdev->dev); - return 0; } static struct platform_driver nwl_dsi_driver = { .probe = nwl_dsi_probe, - .remove = nwl_dsi_remove, + .remove_new = nwl_dsi_remove, .driver = { .of_match_table = nwl_dsi_dt_ids, .name = DRV_NAME, diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c index 530ee6a19e7e..efa80e309b98 100644 --- a/drivers/gpu/drm/bridge/parade-ps8622.c +++ b/drivers/gpu/drm/bridge/parade-ps8622.c @@ -496,7 +496,7 @@ static int ps8622_probe(struct i2c_client *client) ps8622->lane_count = ps8622->max_lane_count; } - if (!of_find_property(dev->of_node, "use-external-pwm", NULL)) { + if (!of_property_read_bool(dev->of_node, "use-external-pwm")) { ps8622->bl = backlight_device_register("ps8622-backlight", dev, ps8622, &ps8622_backlight_ops, NULL); diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c index 4b361d7d5e44..b823e55650b1 100644 --- a/drivers/gpu/drm/bridge/parade-ps8640.c +++ b/drivers/gpu/drm/bridge/parade-ps8640.c @@ -105,6 +105,7 @@ struct ps8640 { struct gpio_desc *gpio_reset; struct gpio_desc *gpio_powerdown; struct device_link *link; + struct edid *edid; bool pre_enabled; bool need_post_hpd_delay; }; @@ -542,34 +543,44 @@ static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge, struct drm_connector *connector) { struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); + struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev; bool poweroff = !ps_bridge->pre_enabled; - struct edid *edid; - /* - * When we end calling get_edid() triggered by an ioctl, i.e - * - * drm_mode_getconnector (ioctl) - * -> drm_helper_probe_single_connector_modes - * -> drm_bridge_connector_get_modes - * -> ps8640_bridge_get_edid - * - * We need to make sure that what we need is enabled before reading - * EDID, for this chip, we need to do a full poweron, otherwise it will - * fail. - */ - drm_atomic_bridge_chain_pre_enable(bridge, connector->state->state); + if (!ps_bridge->edid) { + /* + * When we end calling get_edid() triggered by an ioctl, i.e + * + * drm_mode_getconnector (ioctl) + * -> drm_helper_probe_single_connector_modes + * -> drm_bridge_connector_get_modes + * -> ps8640_bridge_get_edid + * + * We need to make sure that what we need is enabled before + * reading EDID, for this chip, we need to do a full poweron, + * otherwise it will fail. + */ + if (poweroff) + drm_atomic_bridge_chain_pre_enable(bridge, + connector->state->state); - edid = drm_get_edid(connector, - ps_bridge->page[PAGE0_DP_CNTL]->adapter); + ps_bridge->edid = drm_get_edid(connector, + ps_bridge->page[PAGE0_DP_CNTL]->adapter); - /* - * If we call the get_edid() function without having enabled the chip - * before, return the chip to its original power state. - */ - if (poweroff) - drm_atomic_bridge_chain_post_disable(bridge, connector->state->state); + /* + * If we call the get_edid() function without having enabled the + * chip before, return the chip to its original power state. + */ + if (poweroff) + drm_atomic_bridge_chain_post_disable(bridge, + connector->state->state); + } + + if (!ps_bridge->edid) { + dev_err(dev, "Failed to get EDID\n"); + return NULL; + } - return edid; + return drm_edid_duplicate(ps_bridge->edid); } static void ps8640_runtime_disable(void *data) @@ -766,6 +777,13 @@ static int ps8640_probe(struct i2c_client *client) return ret; } +static void ps8640_remove(struct i2c_client *client) +{ + struct ps8640 *ps_bridge = i2c_get_clientdata(client); + + kfree(ps_bridge->edid); +} + static const struct of_device_id ps8640_match[] = { { .compatible = "parade,ps8640" }, { } @@ -774,6 +792,7 @@ MODULE_DEVICE_TABLE(of, ps8640_match); static struct i2c_driver ps8640_driver = { .probe_new = ps8640_probe, + .remove = ps8640_remove, .driver = { .name = "ps8640", .of_match_table = ps8640_match, diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c new file mode 100644 index 000000000000..e0a402a85787 --- /dev/null +++ b/drivers/gpu/drm/bridge/samsung-dsim.c @@ -0,0 +1,1967 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Samsung MIPI DSIM bridge driver. + * + * Copyright (C) 2021 Amarula Solutions(India) + * Copyright (c) 2014 Samsung Electronics Co., Ltd + * Author: Jagan Teki <jagan@amarulasolutions.com> + * + * Based on exynos_drm_dsi from + * Tomasz Figa <t.figa@samsung.com> + */ + +#include <asm/unaligned.h> + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/irq.h> +#include <linux/media-bus-format.h> +#include <linux/of_device.h> +#include <linux/phy/phy.h> + +#include <video/mipi_display.h> + +#include <drm/bridge/samsung-dsim.h> +#include <drm/drm_panel.h> +#include <drm/drm_print.h> + +/* returns true iff both arguments logically differs */ +#define NEQV(a, b) (!(a) ^ !(b)) + +/* DSIM_STATUS */ +#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0) +#define DSIM_STOP_STATE_CLK BIT(8) +#define DSIM_TX_READY_HS_CLK BIT(10) +#define DSIM_PLL_STABLE BIT(31) + +/* DSIM_SWRST */ +#define DSIM_FUNCRST BIT(16) +#define DSIM_SWRST BIT(0) + +/* DSIM_TIMEOUT */ +#define DSIM_LPDR_TIMEOUT(x) ((x) << 0) +#define DSIM_BTA_TIMEOUT(x) ((x) << 16) + +/* DSIM_CLKCTRL */ +#define DSIM_ESC_PRESCALER(x) (((x) & 0xffff) << 0) +#define DSIM_ESC_PRESCALER_MASK (0xffff << 0) +#define DSIM_LANE_ESC_CLK_EN_CLK BIT(19) +#define DSIM_LANE_ESC_CLK_EN_DATA(x) (((x) & 0xf) << 20) +#define DSIM_LANE_ESC_CLK_EN_DATA_MASK (0xf << 20) +#define DSIM_BYTE_CLKEN BIT(24) +#define DSIM_BYTE_CLK_SRC(x) (((x) & 0x3) << 25) +#define DSIM_BYTE_CLK_SRC_MASK (0x3 << 25) +#define DSIM_PLL_BYPASS BIT(27) +#define DSIM_ESC_CLKEN BIT(28) +#define DSIM_TX_REQUEST_HSCLK BIT(31) + +/* DSIM_CONFIG */ +#define DSIM_LANE_EN_CLK BIT(0) +#define DSIM_LANE_EN(x) (((x) & 0xf) << 1) +#define DSIM_NUM_OF_DATA_LANE(x) (((x) & 0x3) << 5) +#define DSIM_SUB_PIX_FORMAT(x) (((x) & 0x7) << 8) +#define DSIM_MAIN_PIX_FORMAT_MASK (0x7 << 12) +#define DSIM_MAIN_PIX_FORMAT_RGB888 (0x7 << 12) +#define DSIM_MAIN_PIX_FORMAT_RGB666 (0x6 << 12) +#define DSIM_MAIN_PIX_FORMAT_RGB666_P (0x5 << 12) +#define DSIM_MAIN_PIX_FORMAT_RGB565 (0x4 << 12) +#define DSIM_SUB_VC (((x) & 0x3) << 16) +#define DSIM_MAIN_VC (((x) & 0x3) << 18) +#define DSIM_HSA_DISABLE_MODE BIT(20) +#define DSIM_HBP_DISABLE_MODE BIT(21) +#define DSIM_HFP_DISABLE_MODE BIT(22) +/* + * The i.MX 8M Mini Applications Processor Reference Manual, + * Rev. 3, 11/2020 Page 4091 + * The i.MX 8M Nano Applications Processor Reference Manual, + * Rev. 2, 07/2022 Page 3058 + * The i.MX 8M Plus Applications Processor Reference Manual, + * Rev. 1, 06/2021 Page 5436 + * all claims this bit is 'HseDisableMode' with the definition + * 0 = Disables transfer + * 1 = Enables transfer + * + * This clearly states that HSE is not a disabled bit. + * + * The naming convention follows as per the manual and the + * driver logic is based on the MIPI_DSI_MODE_VIDEO_HSE flag. + */ +#define DSIM_HSE_DISABLE_MODE BIT(23) +#define DSIM_AUTO_MODE BIT(24) +#define DSIM_VIDEO_MODE BIT(25) +#define DSIM_BURST_MODE BIT(26) +#define DSIM_SYNC_INFORM BIT(27) +#define DSIM_EOT_DISABLE BIT(28) +#define DSIM_MFLUSH_VS BIT(29) +/* This flag is valid only for exynos3250/3472/5260/5430 */ +#define DSIM_CLKLANE_STOP BIT(30) + +/* DSIM_ESCMODE */ +#define DSIM_TX_TRIGGER_RST BIT(4) +#define DSIM_TX_LPDT_LP BIT(6) +#define DSIM_CMD_LPDT_LP BIT(7) +#define DSIM_FORCE_BTA BIT(16) +#define DSIM_FORCE_STOP_STATE BIT(20) +#define DSIM_STOP_STATE_CNT(x) (((x) & 0x7ff) << 21) +#define DSIM_STOP_STATE_CNT_MASK (0x7ff << 21) + +/* DSIM_MDRESOL */ +#define DSIM_MAIN_STAND_BY BIT(31) +#define DSIM_MAIN_VRESOL(x, num_bits) (((x) & ((1 << (num_bits)) - 1)) << 16) +#define DSIM_MAIN_HRESOL(x, num_bits) (((x) & ((1 << (num_bits)) - 1)) << 0) + +/* DSIM_MVPORCH */ +#define DSIM_CMD_ALLOW(x) ((x) << 28) +#define DSIM_STABLE_VFP(x) ((x) << 16) +#define DSIM_MAIN_VBP(x) ((x) << 0) +#define DSIM_CMD_ALLOW_MASK (0xf << 28) +#define DSIM_STABLE_VFP_MASK (0x7ff << 16) +#define DSIM_MAIN_VBP_MASK (0x7ff << 0) + +/* DSIM_MHPORCH */ +#define DSIM_MAIN_HFP(x) ((x) << 16) +#define DSIM_MAIN_HBP(x) ((x) << 0) +#define DSIM_MAIN_HFP_MASK ((0xffff) << 16) +#define DSIM_MAIN_HBP_MASK ((0xffff) << 0) + +/* DSIM_MSYNC */ +#define DSIM_MAIN_VSA(x) ((x) << 22) +#define DSIM_MAIN_HSA(x) ((x) << 0) +#define DSIM_MAIN_VSA_MASK ((0x3ff) << 22) +#define DSIM_MAIN_HSA_MASK ((0xffff) << 0) + +/* DSIM_SDRESOL */ +#define DSIM_SUB_STANDY(x) ((x) << 31) +#define DSIM_SUB_VRESOL(x) ((x) << 16) +#define DSIM_SUB_HRESOL(x) ((x) << 0) +#define DSIM_SUB_STANDY_MASK ((0x1) << 31) +#define DSIM_SUB_VRESOL_MASK ((0x7ff) << 16) +#define DSIM_SUB_HRESOL_MASK ((0x7ff) << 0) + +/* DSIM_INTSRC */ +#define DSIM_INT_PLL_STABLE BIT(31) +#define DSIM_INT_SW_RST_RELEASE BIT(30) +#define DSIM_INT_SFR_FIFO_EMPTY BIT(29) +#define DSIM_INT_SFR_HDR_FIFO_EMPTY BIT(28) +#define DSIM_INT_BTA BIT(25) +#define DSIM_INT_FRAME_DONE BIT(24) +#define DSIM_INT_RX_TIMEOUT BIT(21) +#define DSIM_INT_BTA_TIMEOUT BIT(20) +#define DSIM_INT_RX_DONE BIT(18) +#define DSIM_INT_RX_TE BIT(17) +#define DSIM_INT_RX_ACK BIT(16) +#define DSIM_INT_RX_ECC_ERR BIT(15) +#define DSIM_INT_RX_CRC_ERR BIT(14) + +/* DSIM_FIFOCTRL */ +#define DSIM_RX_DATA_FULL BIT(25) +#define DSIM_RX_DATA_EMPTY BIT(24) +#define DSIM_SFR_HEADER_FULL BIT(23) +#define DSIM_SFR_HEADER_EMPTY BIT(22) +#define DSIM_SFR_PAYLOAD_FULL BIT(21) +#define DSIM_SFR_PAYLOAD_EMPTY BIT(20) +#define DSIM_I80_HEADER_FULL BIT(19) +#define DSIM_I80_HEADER_EMPTY BIT(18) +#define DSIM_I80_PAYLOAD_FULL BIT(17) +#define DSIM_I80_PAYLOAD_EMPTY BIT(16) +#define DSIM_SD_HEADER_FULL BIT(15) +#define DSIM_SD_HEADER_EMPTY BIT(14) +#define DSIM_SD_PAYLOAD_FULL BIT(13) +#define DSIM_SD_PAYLOAD_EMPTY BIT(12) +#define DSIM_MD_HEADER_FULL BIT(11) +#define DSIM_MD_HEADER_EMPTY BIT(10) +#define DSIM_MD_PAYLOAD_FULL BIT(9) +#define DSIM_MD_PAYLOAD_EMPTY BIT(8) +#define DSIM_RX_FIFO BIT(4) +#define DSIM_SFR_FIFO BIT(3) +#define DSIM_I80_FIFO BIT(2) +#define DSIM_SD_FIFO BIT(1) +#define DSIM_MD_FIFO BIT(0) + +/* DSIM_PHYACCHR */ +#define DSIM_AFC_EN BIT(14) +#define DSIM_AFC_CTL(x) (((x) & 0x7) << 5) + +/* DSIM_PLLCTRL */ +#define DSIM_FREQ_BAND(x) ((x) << 24) +#define DSIM_PLL_EN BIT(23) +#define DSIM_PLL_P(x, offset) ((x) << (offset)) +#define DSIM_PLL_M(x) ((x) << 4) +#define DSIM_PLL_S(x) ((x) << 1) + +/* DSIM_PHYCTRL */ +#define DSIM_PHYCTRL_ULPS_EXIT(x) (((x) & 0x1ff) << 0) +#define DSIM_PHYCTRL_B_DPHYCTL_VREG_LP BIT(30) +#define DSIM_PHYCTRL_B_DPHYCTL_SLEW_UP BIT(14) + +/* DSIM_PHYTIMING */ +#define DSIM_PHYTIMING_LPX(x) ((x) << 8) +#define DSIM_PHYTIMING_HS_EXIT(x) ((x) << 0) + +/* DSIM_PHYTIMING1 */ +#define DSIM_PHYTIMING1_CLK_PREPARE(x) ((x) << 24) +#define DSIM_PHYTIMING1_CLK_ZERO(x) ((x) << 16) +#define DSIM_PHYTIMING1_CLK_POST(x) ((x) << 8) +#define DSIM_PHYTIMING1_CLK_TRAIL(x) ((x) << 0) + +/* DSIM_PHYTIMING2 */ +#define DSIM_PHYTIMING2_HS_PREPARE(x) ((x) << 16) +#define DSIM_PHYTIMING2_HS_ZERO(x) ((x) << 8) +#define DSIM_PHYTIMING2_HS_TRAIL(x) ((x) << 0) + +#define DSI_MAX_BUS_WIDTH 4 +#define DSI_NUM_VIRTUAL_CHANNELS 4 +#define DSI_TX_FIFO_SIZE 2048 +#define DSI_RX_FIFO_SIZE 256 +#define DSI_XFER_TIMEOUT_MS 100 +#define DSI_RX_FIFO_EMPTY 0x30800002 + +#define OLD_SCLK_MIPI_CLK_NAME "pll_clk" + +static const char *const clk_names[5] = { + "bus_clk", + "sclk_mipi", + "phyclk_mipidphy0_bitclkdiv8", + "phyclk_mipidphy0_rxclkesc0", + "sclk_rgb_vclk_to_dsim0" +}; + +enum samsung_dsim_transfer_type { + EXYNOS_DSI_TX, + EXYNOS_DSI_RX, +}; + +enum reg_idx { + DSIM_STATUS_REG, /* Status register */ + DSIM_SWRST_REG, /* Software reset register */ + DSIM_CLKCTRL_REG, /* Clock control register */ + DSIM_TIMEOUT_REG, /* Time out register */ + DSIM_CONFIG_REG, /* Configuration register */ + DSIM_ESCMODE_REG, /* Escape mode register */ + DSIM_MDRESOL_REG, + DSIM_MVPORCH_REG, /* Main display Vporch register */ + DSIM_MHPORCH_REG, /* Main display Hporch register */ + DSIM_MSYNC_REG, /* Main display sync area register */ + DSIM_INTSRC_REG, /* Interrupt source register */ + DSIM_INTMSK_REG, /* Interrupt mask register */ + DSIM_PKTHDR_REG, /* Packet Header FIFO register */ + DSIM_PAYLOAD_REG, /* Payload FIFO register */ + DSIM_RXFIFO_REG, /* Read FIFO register */ + DSIM_FIFOCTRL_REG, /* FIFO status and control register */ + DSIM_PLLCTRL_REG, /* PLL control register */ + DSIM_PHYCTRL_REG, + DSIM_PHYTIMING_REG, + DSIM_PHYTIMING1_REG, + DSIM_PHYTIMING2_REG, + NUM_REGS +}; + +static const unsigned int exynos_reg_ofs[] = { + [DSIM_STATUS_REG] = 0x00, + [DSIM_SWRST_REG] = 0x04, + [DSIM_CLKCTRL_REG] = 0x08, + [DSIM_TIMEOUT_REG] = 0x0c, + [DSIM_CONFIG_REG] = 0x10, + [DSIM_ESCMODE_REG] = 0x14, + [DSIM_MDRESOL_REG] = 0x18, + [DSIM_MVPORCH_REG] = 0x1c, + [DSIM_MHPORCH_REG] = 0x20, + [DSIM_MSYNC_REG] = 0x24, + [DSIM_INTSRC_REG] = 0x2c, + [DSIM_INTMSK_REG] = 0x30, + [DSIM_PKTHDR_REG] = 0x34, + [DSIM_PAYLOAD_REG] = 0x38, + [DSIM_RXFIFO_REG] = 0x3c, + [DSIM_FIFOCTRL_REG] = 0x44, + [DSIM_PLLCTRL_REG] = 0x4c, + [DSIM_PHYCTRL_REG] = 0x5c, + [DSIM_PHYTIMING_REG] = 0x64, + [DSIM_PHYTIMING1_REG] = 0x68, + [DSIM_PHYTIMING2_REG] = 0x6c, +}; + +static const unsigned int exynos5433_reg_ofs[] = { + [DSIM_STATUS_REG] = 0x04, + [DSIM_SWRST_REG] = 0x0C, + [DSIM_CLKCTRL_REG] = 0x10, + [DSIM_TIMEOUT_REG] = 0x14, + [DSIM_CONFIG_REG] = 0x18, + [DSIM_ESCMODE_REG] = 0x1C, + [DSIM_MDRESOL_REG] = 0x20, + [DSIM_MVPORCH_REG] = 0x24, + [DSIM_MHPORCH_REG] = 0x28, + [DSIM_MSYNC_REG] = 0x2C, + [DSIM_INTSRC_REG] = 0x34, + [DSIM_INTMSK_REG] = 0x38, + [DSIM_PKTHDR_REG] = 0x3C, + [DSIM_PAYLOAD_REG] = 0x40, + [DSIM_RXFIFO_REG] = 0x44, + [DSIM_FIFOCTRL_REG] = 0x4C, + [DSIM_PLLCTRL_REG] = 0x94, + [DSIM_PHYCTRL_REG] = 0xA4, + [DSIM_PHYTIMING_REG] = 0xB4, + [DSIM_PHYTIMING1_REG] = 0xB8, + [DSIM_PHYTIMING2_REG] = 0xBC, +}; + +enum reg_value_idx { + RESET_TYPE, + PLL_TIMER, + STOP_STATE_CNT, + PHYCTRL_ULPS_EXIT, + PHYCTRL_VREG_LP, + PHYCTRL_SLEW_UP, + PHYTIMING_LPX, + PHYTIMING_HS_EXIT, + PHYTIMING_CLK_PREPARE, + PHYTIMING_CLK_ZERO, + PHYTIMING_CLK_POST, + PHYTIMING_CLK_TRAIL, + PHYTIMING_HS_PREPARE, + PHYTIMING_HS_ZERO, + PHYTIMING_HS_TRAIL +}; + +static const unsigned int reg_values[] = { + [RESET_TYPE] = DSIM_SWRST, + [PLL_TIMER] = 500, + [STOP_STATE_CNT] = 0xf, + [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x0af), + [PHYCTRL_VREG_LP] = 0, + [PHYCTRL_SLEW_UP] = 0, + [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x06), + [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0b), + [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x07), + [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x27), + [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0d), + [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x08), + [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x09), + [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x0d), + [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0b), +}; + +static const unsigned int exynos5422_reg_values[] = { + [RESET_TYPE] = DSIM_SWRST, + [PLL_TIMER] = 500, + [STOP_STATE_CNT] = 0xf, + [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0xaf), + [PHYCTRL_VREG_LP] = 0, + [PHYCTRL_SLEW_UP] = 0, + [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x08), + [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0d), + [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x09), + [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x30), + [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0e), + [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x0a), + [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x0c), + [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x11), + [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0d), +}; + +static const unsigned int exynos5433_reg_values[] = { + [RESET_TYPE] = DSIM_FUNCRST, + [PLL_TIMER] = 22200, + [STOP_STATE_CNT] = 0xa, + [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x190), + [PHYCTRL_VREG_LP] = DSIM_PHYCTRL_B_DPHYCTL_VREG_LP, + [PHYCTRL_SLEW_UP] = DSIM_PHYCTRL_B_DPHYCTL_SLEW_UP, + [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x07), + [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0c), + [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x09), + [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x2d), + [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0e), + [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x09), + [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x0b), + [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x10), + [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0c), +}; + +static const unsigned int imx8mm_dsim_reg_values[] = { + [RESET_TYPE] = DSIM_SWRST, + [PLL_TIMER] = 500, + [STOP_STATE_CNT] = 0xf, + [PHYCTRL_ULPS_EXIT] = 0, + [PHYCTRL_VREG_LP] = 0, + [PHYCTRL_SLEW_UP] = 0, + [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x06), + [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0b), + [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x07), + [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x26), + [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0d), + [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x08), + [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x08), + [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x0d), + [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0b), +}; + +static const struct samsung_dsim_driver_data exynos3_dsi_driver_data = { + .reg_ofs = exynos_reg_ofs, + .plltmr_reg = 0x50, + .has_freqband = 1, + .has_clklane_stop = 1, + .num_clks = 2, + .max_freq = 1000, + .wait_for_reset = 1, + .num_bits_resol = 11, + .pll_p_offset = 13, + .reg_values = reg_values, +}; + +static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = { + .reg_ofs = exynos_reg_ofs, + .plltmr_reg = 0x50, + .has_freqband = 1, + .has_clklane_stop = 1, + .num_clks = 2, + .max_freq = 1000, + .wait_for_reset = 1, + .num_bits_resol = 11, + .pll_p_offset = 13, + .reg_values = reg_values, +}; + +static const struct samsung_dsim_driver_data exynos5_dsi_driver_data = { + .reg_ofs = exynos_reg_ofs, + .plltmr_reg = 0x58, + .num_clks = 2, + .max_freq = 1000, + .wait_for_reset = 1, + .num_bits_resol = 11, + .pll_p_offset = 13, + .reg_values = reg_values, +}; + +static const struct samsung_dsim_driver_data exynos5433_dsi_driver_data = { + .reg_ofs = exynos5433_reg_ofs, + .plltmr_reg = 0xa0, + .has_clklane_stop = 1, + .num_clks = 5, + .max_freq = 1500, + .wait_for_reset = 0, + .num_bits_resol = 12, + .pll_p_offset = 13, + .reg_values = exynos5433_reg_values, +}; + +static const struct samsung_dsim_driver_data exynos5422_dsi_driver_data = { + .reg_ofs = exynos5433_reg_ofs, + .plltmr_reg = 0xa0, + .has_clklane_stop = 1, + .num_clks = 2, + .max_freq = 1500, + .wait_for_reset = 1, + .num_bits_resol = 12, + .pll_p_offset = 13, + .reg_values = exynos5422_reg_values, +}; + +static const struct samsung_dsim_driver_data imx8mm_dsi_driver_data = { + .reg_ofs = exynos5433_reg_ofs, + .plltmr_reg = 0xa0, + .has_clklane_stop = 1, + .num_clks = 2, + .max_freq = 2100, + .wait_for_reset = 0, + .num_bits_resol = 12, + /* + * Unlike Exynos, PLL_P(PMS_P) offset 14 is used in i.MX8M Mini/Nano/Plus + * downstream driver - drivers/gpu/drm/bridge/sec-dsim.c + */ + .pll_p_offset = 14, + .reg_values = imx8mm_dsim_reg_values, +}; + +static const struct samsung_dsim_driver_data * +samsung_dsim_types[DSIM_TYPE_COUNT] = { + [DSIM_TYPE_EXYNOS3250] = &exynos3_dsi_driver_data, + [DSIM_TYPE_EXYNOS4210] = &exynos4_dsi_driver_data, + [DSIM_TYPE_EXYNOS5410] = &exynos5_dsi_driver_data, + [DSIM_TYPE_EXYNOS5422] = &exynos5422_dsi_driver_data, + [DSIM_TYPE_EXYNOS5433] = &exynos5433_dsi_driver_data, + [DSIM_TYPE_IMX8MM] = &imx8mm_dsi_driver_data, + [DSIM_TYPE_IMX8MP] = &imx8mm_dsi_driver_data, +}; + +static inline struct samsung_dsim *host_to_dsi(struct mipi_dsi_host *h) +{ + return container_of(h, struct samsung_dsim, dsi_host); +} + +static inline struct samsung_dsim *bridge_to_dsi(struct drm_bridge *b) +{ + return container_of(b, struct samsung_dsim, bridge); +} + +static inline void samsung_dsim_write(struct samsung_dsim *dsi, + enum reg_idx idx, u32 val) +{ + writel(val, dsi->reg_base + dsi->driver_data->reg_ofs[idx]); +} + +static inline u32 samsung_dsim_read(struct samsung_dsim *dsi, enum reg_idx idx) +{ + return readl(dsi->reg_base + dsi->driver_data->reg_ofs[idx]); +} + +static void samsung_dsim_wait_for_reset(struct samsung_dsim *dsi) +{ + if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300))) + return; + + dev_err(dsi->dev, "timeout waiting for reset\n"); +} + +static void samsung_dsim_reset(struct samsung_dsim *dsi) +{ + u32 reset_val = dsi->driver_data->reg_values[RESET_TYPE]; + + reinit_completion(&dsi->completed); + samsung_dsim_write(dsi, DSIM_SWRST_REG, reset_val); +} + +#ifndef MHZ +#define MHZ (1000 * 1000) +#endif + +static unsigned long samsung_dsim_pll_find_pms(struct samsung_dsim *dsi, + unsigned long fin, + unsigned long fout, + u8 *p, u16 *m, u8 *s) +{ + const struct samsung_dsim_driver_data *driver_data = dsi->driver_data; + unsigned long best_freq = 0; + u32 min_delta = 0xffffffff; + u8 p_min, p_max; + u8 _p, best_p; + u16 _m, best_m; + u8 _s, best_s; + + p_min = DIV_ROUND_UP(fin, (12 * MHZ)); + p_max = fin / (6 * MHZ); + + for (_p = p_min; _p <= p_max; ++_p) { + for (_s = 0; _s <= 5; ++_s) { + u64 tmp; + u32 delta; + + tmp = (u64)fout * (_p << _s); + do_div(tmp, fin); + _m = tmp; + if (_m < 41 || _m > 125) + continue; + + tmp = (u64)_m * fin; + do_div(tmp, _p); + if (tmp < 500 * MHZ || + tmp > driver_data->max_freq * MHZ) + continue; + + tmp = (u64)_m * fin; + do_div(tmp, _p << _s); + + delta = abs(fout - tmp); + if (delta < min_delta) { + best_p = _p; + best_m = _m; + best_s = _s; + min_delta = delta; + best_freq = tmp; + } + } + } + + if (best_freq) { + *p = best_p; + *m = best_m; + *s = best_s; + } + + return best_freq; +} + +static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi, + unsigned long freq) +{ + const struct samsung_dsim_driver_data *driver_data = dsi->driver_data; + unsigned long fin, fout; + int timeout; + u8 p, s; + u16 m; + u32 reg; + + fin = dsi->pll_clk_rate; + fout = samsung_dsim_pll_find_pms(dsi, fin, freq, &p, &m, &s); + if (!fout) { + dev_err(dsi->dev, + "failed to find PLL PMS for requested frequency\n"); + return 0; + } + dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s); + + writel(driver_data->reg_values[PLL_TIMER], + dsi->reg_base + driver_data->plltmr_reg); + + reg = DSIM_PLL_EN | DSIM_PLL_P(p, driver_data->pll_p_offset) | + DSIM_PLL_M(m) | DSIM_PLL_S(s); + + if (driver_data->has_freqband) { + static const unsigned long freq_bands[] = { + 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ, + 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ, + 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ, + 770 * MHZ, 870 * MHZ, 950 * MHZ, + }; + int band; + + for (band = 0; band < ARRAY_SIZE(freq_bands); ++band) + if (fout < freq_bands[band]) + break; + + dev_dbg(dsi->dev, "band %d\n", band); + + reg |= DSIM_FREQ_BAND(band); + } + + samsung_dsim_write(dsi, DSIM_PLLCTRL_REG, reg); + + timeout = 1000; + do { + if (timeout-- == 0) { + dev_err(dsi->dev, "PLL failed to stabilize\n"); + return 0; + } + reg = samsung_dsim_read(dsi, DSIM_STATUS_REG); + } while ((reg & DSIM_PLL_STABLE) == 0); + + return fout; +} + +static int samsung_dsim_enable_clock(struct samsung_dsim *dsi) +{ + unsigned long hs_clk, byte_clk, esc_clk; + unsigned long esc_div; + u32 reg; + + hs_clk = samsung_dsim_set_pll(dsi, dsi->burst_clk_rate); + if (!hs_clk) { + dev_err(dsi->dev, "failed to configure DSI PLL\n"); + return -EFAULT; + } + + byte_clk = hs_clk / 8; + esc_div = DIV_ROUND_UP(byte_clk, dsi->esc_clk_rate); + esc_clk = byte_clk / esc_div; + + if (esc_clk > 20 * MHZ) { + ++esc_div; + esc_clk = byte_clk / esc_div; + } + + dev_dbg(dsi->dev, "hs_clk = %lu, byte_clk = %lu, esc_clk = %lu\n", + hs_clk, byte_clk, esc_clk); + + reg = samsung_dsim_read(dsi, DSIM_CLKCTRL_REG); + reg &= ~(DSIM_ESC_PRESCALER_MASK | DSIM_LANE_ESC_CLK_EN_CLK + | DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_PLL_BYPASS + | DSIM_BYTE_CLK_SRC_MASK); + reg |= DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN + | DSIM_ESC_PRESCALER(esc_div) + | DSIM_LANE_ESC_CLK_EN_CLK + | DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1) + | DSIM_BYTE_CLK_SRC(0) + | DSIM_TX_REQUEST_HSCLK; + samsung_dsim_write(dsi, DSIM_CLKCTRL_REG, reg); + + return 0; +} + +static void samsung_dsim_set_phy_ctrl(struct samsung_dsim *dsi) +{ + const struct samsung_dsim_driver_data *driver_data = dsi->driver_data; + const unsigned int *reg_values = driver_data->reg_values; + u32 reg; + + if (driver_data->has_freqband) + return; + + /* B D-PHY: D-PHY Master & Slave Analog Block control */ + reg = reg_values[PHYCTRL_ULPS_EXIT] | reg_values[PHYCTRL_VREG_LP] | + reg_values[PHYCTRL_SLEW_UP]; + samsung_dsim_write(dsi, DSIM_PHYCTRL_REG, reg); + + /* + * T LPX: Transmitted length of any Low-Power state period + * T HS-EXIT: Time that the transmitter drives LP-11 following a HS + * burst + */ + reg = reg_values[PHYTIMING_LPX] | reg_values[PHYTIMING_HS_EXIT]; + samsung_dsim_write(dsi, DSIM_PHYTIMING_REG, reg); + + /* + * T CLK-PREPARE: Time that the transmitter drives the Clock Lane LP-00 + * Line state immediately before the HS-0 Line state starting the + * HS transmission + * T CLK-ZERO: Time that the transmitter drives the HS-0 state prior to + * transmitting the Clock. + * T CLK_POST: Time that the transmitter continues to send HS clock + * after the last associated Data Lane has transitioned to LP Mode + * Interval is defined as the period from the end of T HS-TRAIL to + * the beginning of T CLK-TRAIL + * T CLK-TRAIL: Time that the transmitter drives the HS-0 state after + * the last payload clock bit of a HS transmission burst + */ + reg = reg_values[PHYTIMING_CLK_PREPARE] | + reg_values[PHYTIMING_CLK_ZERO] | + reg_values[PHYTIMING_CLK_POST] | + reg_values[PHYTIMING_CLK_TRAIL]; + + samsung_dsim_write(dsi, DSIM_PHYTIMING1_REG, reg); + + /* + * T HS-PREPARE: Time that the transmitter drives the Data Lane LP-00 + * Line state immediately before the HS-0 Line state starting the + * HS transmission + * T HS-ZERO: Time that the transmitter drives the HS-0 state prior to + * transmitting the Sync sequence. + * T HS-TRAIL: Time that the transmitter drives the flipped differential + * state after last payload data bit of a HS transmission burst + */ + reg = reg_values[PHYTIMING_HS_PREPARE] | reg_values[PHYTIMING_HS_ZERO] | + reg_values[PHYTIMING_HS_TRAIL]; + samsung_dsim_write(dsi, DSIM_PHYTIMING2_REG, reg); +} + +static void samsung_dsim_disable_clock(struct samsung_dsim *dsi) +{ + u32 reg; + + reg = samsung_dsim_read(dsi, DSIM_CLKCTRL_REG); + reg &= ~(DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK + | DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN); + samsung_dsim_write(dsi, DSIM_CLKCTRL_REG, reg); + + reg = samsung_dsim_read(dsi, DSIM_PLLCTRL_REG); + reg &= ~DSIM_PLL_EN; + samsung_dsim_write(dsi, DSIM_PLLCTRL_REG, reg); +} + +static void samsung_dsim_enable_lane(struct samsung_dsim *dsi, u32 lane) +{ + u32 reg = samsung_dsim_read(dsi, DSIM_CONFIG_REG); + + reg |= (DSIM_NUM_OF_DATA_LANE(dsi->lanes - 1) | DSIM_LANE_EN_CLK | + DSIM_LANE_EN(lane)); + samsung_dsim_write(dsi, DSIM_CONFIG_REG, reg); +} + +static int samsung_dsim_init_link(struct samsung_dsim *dsi) +{ + const struct samsung_dsim_driver_data *driver_data = dsi->driver_data; + int timeout; + u32 reg; + u32 lanes_mask; + + /* Initialize FIFO pointers */ + reg = samsung_dsim_read(dsi, DSIM_FIFOCTRL_REG); + reg &= ~0x1f; + samsung_dsim_write(dsi, DSIM_FIFOCTRL_REG, reg); + + usleep_range(9000, 11000); + + reg |= 0x1f; + samsung_dsim_write(dsi, DSIM_FIFOCTRL_REG, reg); + usleep_range(9000, 11000); + + /* DSI configuration */ + reg = 0; + + /* + * The first bit of mode_flags specifies display configuration. + * If this bit is set[= MIPI_DSI_MODE_VIDEO], dsi will support video + * mode, otherwise it will support command mode. + */ + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { + reg |= DSIM_VIDEO_MODE; + + /* + * The user manual describes that following bits are ignored in + * command mode. + */ + if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH)) + reg |= DSIM_MFLUSH_VS; + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) + reg |= DSIM_SYNC_INFORM; + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) + reg |= DSIM_BURST_MODE; + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_AUTO_VERT) + reg |= DSIM_AUTO_MODE; + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSE) + reg |= DSIM_HSE_DISABLE_MODE; + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HFP) + reg |= DSIM_HFP_DISABLE_MODE; + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HBP) + reg |= DSIM_HBP_DISABLE_MODE; + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HSA) + reg |= DSIM_HSA_DISABLE_MODE; + } + + if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET) + reg |= DSIM_EOT_DISABLE; + + switch (dsi->format) { + case MIPI_DSI_FMT_RGB888: + reg |= DSIM_MAIN_PIX_FORMAT_RGB888; + break; + case MIPI_DSI_FMT_RGB666: + reg |= DSIM_MAIN_PIX_FORMAT_RGB666; + break; + case MIPI_DSI_FMT_RGB666_PACKED: + reg |= DSIM_MAIN_PIX_FORMAT_RGB666_P; + break; + case MIPI_DSI_FMT_RGB565: + reg |= DSIM_MAIN_PIX_FORMAT_RGB565; + break; + default: + dev_err(dsi->dev, "invalid pixel format\n"); + return -EINVAL; + } + + /* + * Use non-continuous clock mode if the periparal wants and + * host controller supports + * + * In non-continous clock mode, host controller will turn off + * the HS clock between high-speed transmissions to reduce + * power consumption. + */ + if (driver_data->has_clklane_stop && + dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) + reg |= DSIM_CLKLANE_STOP; + samsung_dsim_write(dsi, DSIM_CONFIG_REG, reg); + + lanes_mask = BIT(dsi->lanes) - 1; + samsung_dsim_enable_lane(dsi, lanes_mask); + + /* Check clock and data lane state are stop state */ + timeout = 100; + do { + if (timeout-- == 0) { + dev_err(dsi->dev, "waiting for bus lanes timed out\n"); + return -EFAULT; + } + + reg = samsung_dsim_read(dsi, DSIM_STATUS_REG); + if ((reg & DSIM_STOP_STATE_DAT(lanes_mask)) + != DSIM_STOP_STATE_DAT(lanes_mask)) + continue; + } while (!(reg & (DSIM_STOP_STATE_CLK | DSIM_TX_READY_HS_CLK))); + + reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG); + reg &= ~DSIM_STOP_STATE_CNT_MASK; + reg |= DSIM_STOP_STATE_CNT(driver_data->reg_values[STOP_STATE_CNT]); + samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg); + + reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff); + samsung_dsim_write(dsi, DSIM_TIMEOUT_REG, reg); + + return 0; +} + +static void samsung_dsim_set_display_mode(struct samsung_dsim *dsi) +{ + struct drm_display_mode *m = &dsi->mode; + unsigned int num_bits_resol = dsi->driver_data->num_bits_resol; + u32 reg; + + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { + reg = DSIM_CMD_ALLOW(0xf) + | DSIM_STABLE_VFP(m->vsync_start - m->vdisplay) + | DSIM_MAIN_VBP(m->vtotal - m->vsync_end); + samsung_dsim_write(dsi, DSIM_MVPORCH_REG, reg); + + reg = DSIM_MAIN_HFP(m->hsync_start - m->hdisplay) + | DSIM_MAIN_HBP(m->htotal - m->hsync_end); + samsung_dsim_write(dsi, DSIM_MHPORCH_REG, reg); + + reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start) + | DSIM_MAIN_HSA(m->hsync_end - m->hsync_start); + samsung_dsim_write(dsi, DSIM_MSYNC_REG, reg); + } + reg = DSIM_MAIN_HRESOL(m->hdisplay, num_bits_resol) | + DSIM_MAIN_VRESOL(m->vdisplay, num_bits_resol); + + samsung_dsim_write(dsi, DSIM_MDRESOL_REG, reg); + + dev_dbg(dsi->dev, "LCD size = %dx%d\n", m->hdisplay, m->vdisplay); +} + +static void samsung_dsim_set_display_enable(struct samsung_dsim *dsi, bool enable) +{ + u32 reg; + + reg = samsung_dsim_read(dsi, DSIM_MDRESOL_REG); + if (enable) + reg |= DSIM_MAIN_STAND_BY; + else + reg &= ~DSIM_MAIN_STAND_BY; + samsung_dsim_write(dsi, DSIM_MDRESOL_REG, reg); +} + +static int samsung_dsim_wait_for_hdr_fifo(struct samsung_dsim *dsi) +{ + int timeout = 2000; + + do { + u32 reg = samsung_dsim_read(dsi, DSIM_FIFOCTRL_REG); + + if (!(reg & DSIM_SFR_HEADER_FULL)) + return 0; + + if (!cond_resched()) + usleep_range(950, 1050); + } while (--timeout); + + return -ETIMEDOUT; +} + +static void samsung_dsim_set_cmd_lpm(struct samsung_dsim *dsi, bool lpm) +{ + u32 v = samsung_dsim_read(dsi, DSIM_ESCMODE_REG); + + if (lpm) + v |= DSIM_CMD_LPDT_LP; + else + v &= ~DSIM_CMD_LPDT_LP; + + samsung_dsim_write(dsi, DSIM_ESCMODE_REG, v); +} + +static void samsung_dsim_force_bta(struct samsung_dsim *dsi) +{ + u32 v = samsung_dsim_read(dsi, DSIM_ESCMODE_REG); + + v |= DSIM_FORCE_BTA; + samsung_dsim_write(dsi, DSIM_ESCMODE_REG, v); +} + +static void samsung_dsim_send_to_fifo(struct samsung_dsim *dsi, + struct samsung_dsim_transfer *xfer) +{ + struct device *dev = dsi->dev; + struct mipi_dsi_packet *pkt = &xfer->packet; + const u8 *payload = pkt->payload + xfer->tx_done; + u16 length = pkt->payload_length - xfer->tx_done; + bool first = !xfer->tx_done; + u32 reg; + + dev_dbg(dev, "< xfer %pK: tx len %u, done %u, rx len %u, done %u\n", + xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done); + + if (length > DSI_TX_FIFO_SIZE) + length = DSI_TX_FIFO_SIZE; + + xfer->tx_done += length; + + /* Send payload */ + while (length >= 4) { + reg = get_unaligned_le32(payload); + samsung_dsim_write(dsi, DSIM_PAYLOAD_REG, reg); + payload += 4; + length -= 4; + } + + reg = 0; + switch (length) { + case 3: + reg |= payload[2] << 16; + fallthrough; + case 2: + reg |= payload[1] << 8; + fallthrough; + case 1: + reg |= payload[0]; + samsung_dsim_write(dsi, DSIM_PAYLOAD_REG, reg); + break; + } + + /* Send packet header */ + if (!first) + return; + + reg = get_unaligned_le32(pkt->header); + if (samsung_dsim_wait_for_hdr_fifo(dsi)) { + dev_err(dev, "waiting for header FIFO timed out\n"); + return; + } + + if (NEQV(xfer->flags & MIPI_DSI_MSG_USE_LPM, + dsi->state & DSIM_STATE_CMD_LPM)) { + samsung_dsim_set_cmd_lpm(dsi, xfer->flags & MIPI_DSI_MSG_USE_LPM); + dsi->state ^= DSIM_STATE_CMD_LPM; + } + + samsung_dsim_write(dsi, DSIM_PKTHDR_REG, reg); + + if (xfer->flags & MIPI_DSI_MSG_REQ_ACK) + samsung_dsim_force_bta(dsi); +} + +static void samsung_dsim_read_from_fifo(struct samsung_dsim *dsi, + struct samsung_dsim_transfer *xfer) +{ + u8 *payload = xfer->rx_payload + xfer->rx_done; + bool first = !xfer->rx_done; + struct device *dev = dsi->dev; + u16 length; + u32 reg; + + if (first) { + reg = samsung_dsim_read(dsi, DSIM_RXFIFO_REG); + + switch (reg & 0x3f) { + case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE: + case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE: + if (xfer->rx_len >= 2) { + payload[1] = reg >> 16; + ++xfer->rx_done; + } + fallthrough; + case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: + case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: + payload[0] = reg >> 8; + ++xfer->rx_done; + xfer->rx_len = xfer->rx_done; + xfer->result = 0; + goto clear_fifo; + case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT: + dev_err(dev, "DSI Error Report: 0x%04x\n", (reg >> 8) & 0xffff); + xfer->result = 0; + goto clear_fifo; + } + + length = (reg >> 8) & 0xffff; + if (length > xfer->rx_len) { + dev_err(dev, + "response too long (%u > %u bytes), stripping\n", + xfer->rx_len, length); + length = xfer->rx_len; + } else if (length < xfer->rx_len) { + xfer->rx_len = length; + } + } + + length = xfer->rx_len - xfer->rx_done; + xfer->rx_done += length; + + /* Receive payload */ + while (length >= 4) { + reg = samsung_dsim_read(dsi, DSIM_RXFIFO_REG); + payload[0] = (reg >> 0) & 0xff; + payload[1] = (reg >> 8) & 0xff; + payload[2] = (reg >> 16) & 0xff; + payload[3] = (reg >> 24) & 0xff; + payload += 4; + length -= 4; + } + + if (length) { + reg = samsung_dsim_read(dsi, DSIM_RXFIFO_REG); + switch (length) { + case 3: + payload[2] = (reg >> 16) & 0xff; + fallthrough; + case 2: + payload[1] = (reg >> 8) & 0xff; + fallthrough; + case 1: + payload[0] = reg & 0xff; + } + } + + if (xfer->rx_done == xfer->rx_len) + xfer->result = 0; + +clear_fifo: + length = DSI_RX_FIFO_SIZE / 4; + do { + reg = samsung_dsim_read(dsi, DSIM_RXFIFO_REG); + if (reg == DSI_RX_FIFO_EMPTY) + break; + } while (--length); +} + +static void samsung_dsim_transfer_start(struct samsung_dsim *dsi) +{ + unsigned long flags; + struct samsung_dsim_transfer *xfer; + bool start = false; + +again: + spin_lock_irqsave(&dsi->transfer_lock, flags); + + if (list_empty(&dsi->transfer_list)) { + spin_unlock_irqrestore(&dsi->transfer_lock, flags); + return; + } + + xfer = list_first_entry(&dsi->transfer_list, + struct samsung_dsim_transfer, list); + + spin_unlock_irqrestore(&dsi->transfer_lock, flags); + + if (xfer->packet.payload_length && + xfer->tx_done == xfer->packet.payload_length) + /* waiting for RX */ + return; + + samsung_dsim_send_to_fifo(dsi, xfer); + + if (xfer->packet.payload_length || xfer->rx_len) + return; + + xfer->result = 0; + complete(&xfer->completed); + + spin_lock_irqsave(&dsi->transfer_lock, flags); + + list_del_init(&xfer->list); + start = !list_empty(&dsi->transfer_list); + + spin_unlock_irqrestore(&dsi->transfer_lock, flags); + + if (start) + goto again; +} + +static bool samsung_dsim_transfer_finish(struct samsung_dsim *dsi) +{ + struct samsung_dsim_transfer *xfer; + unsigned long flags; + bool start = true; + + spin_lock_irqsave(&dsi->transfer_lock, flags); + + if (list_empty(&dsi->transfer_list)) { + spin_unlock_irqrestore(&dsi->transfer_lock, flags); + return false; + } + + xfer = list_first_entry(&dsi->transfer_list, + struct samsung_dsim_transfer, list); + + spin_unlock_irqrestore(&dsi->transfer_lock, flags); + + dev_dbg(dsi->dev, + "> xfer %pK, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n", + xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len, + xfer->rx_done); + + if (xfer->tx_done != xfer->packet.payload_length) + return true; + + if (xfer->rx_done != xfer->rx_len) + samsung_dsim_read_from_fifo(dsi, xfer); + + if (xfer->rx_done != xfer->rx_len) + return true; + + spin_lock_irqsave(&dsi->transfer_lock, flags); + + list_del_init(&xfer->list); + start = !list_empty(&dsi->transfer_list); + + spin_unlock_irqrestore(&dsi->transfer_lock, flags); + + if (!xfer->rx_len) + xfer->result = 0; + complete(&xfer->completed); + + return start; +} + +static void samsung_dsim_remove_transfer(struct samsung_dsim *dsi, + struct samsung_dsim_transfer *xfer) +{ + unsigned long flags; + bool start; + + spin_lock_irqsave(&dsi->transfer_lock, flags); + + if (!list_empty(&dsi->transfer_list) && + xfer == list_first_entry(&dsi->transfer_list, + struct samsung_dsim_transfer, list)) { + list_del_init(&xfer->list); + start = !list_empty(&dsi->transfer_list); + spin_unlock_irqrestore(&dsi->transfer_lock, flags); + if (start) + samsung_dsim_transfer_start(dsi); + return; + } + + list_del_init(&xfer->list); + + spin_unlock_irqrestore(&dsi->transfer_lock, flags); +} + +static int samsung_dsim_transfer(struct samsung_dsim *dsi, + struct samsung_dsim_transfer *xfer) +{ + unsigned long flags; + bool stopped; + + xfer->tx_done = 0; + xfer->rx_done = 0; + xfer->result = -ETIMEDOUT; + init_completion(&xfer->completed); + + spin_lock_irqsave(&dsi->transfer_lock, flags); + + stopped = list_empty(&dsi->transfer_list); + list_add_tail(&xfer->list, &dsi->transfer_list); + + spin_unlock_irqrestore(&dsi->transfer_lock, flags); + + if (stopped) + samsung_dsim_transfer_start(dsi); + + wait_for_completion_timeout(&xfer->completed, + msecs_to_jiffies(DSI_XFER_TIMEOUT_MS)); + if (xfer->result == -ETIMEDOUT) { + struct mipi_dsi_packet *pkt = &xfer->packet; + + samsung_dsim_remove_transfer(dsi, xfer); + dev_err(dsi->dev, "xfer timed out: %*ph %*ph\n", 4, pkt->header, + (int)pkt->payload_length, pkt->payload); + return -ETIMEDOUT; + } + + /* Also covers hardware timeout condition */ + return xfer->result; +} + +static irqreturn_t samsung_dsim_irq(int irq, void *dev_id) +{ + struct samsung_dsim *dsi = dev_id; + u32 status; + + status = samsung_dsim_read(dsi, DSIM_INTSRC_REG); + if (!status) { + static unsigned long j; + + if (printk_timed_ratelimit(&j, 500)) + dev_warn(dsi->dev, "spurious interrupt\n"); + return IRQ_HANDLED; + } + samsung_dsim_write(dsi, DSIM_INTSRC_REG, status); + + if (status & DSIM_INT_SW_RST_RELEASE) { + unsigned long mask = ~(DSIM_INT_RX_DONE | + DSIM_INT_SFR_FIFO_EMPTY | + DSIM_INT_SFR_HDR_FIFO_EMPTY | + DSIM_INT_RX_ECC_ERR | + DSIM_INT_SW_RST_RELEASE); + samsung_dsim_write(dsi, DSIM_INTMSK_REG, mask); + complete(&dsi->completed); + return IRQ_HANDLED; + } + + if (!(status & (DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY | + DSIM_INT_PLL_STABLE))) + return IRQ_HANDLED; + + if (samsung_dsim_transfer_finish(dsi)) + samsung_dsim_transfer_start(dsi); + + return IRQ_HANDLED; +} + +static void samsung_dsim_enable_irq(struct samsung_dsim *dsi) +{ + enable_irq(dsi->irq); + + if (dsi->te_gpio) + enable_irq(gpiod_to_irq(dsi->te_gpio)); +} + +static void samsung_dsim_disable_irq(struct samsung_dsim *dsi) +{ + if (dsi->te_gpio) + disable_irq(gpiod_to_irq(dsi->te_gpio)); + + disable_irq(dsi->irq); +} + +static int samsung_dsim_init(struct samsung_dsim *dsi) +{ + const struct samsung_dsim_driver_data *driver_data = dsi->driver_data; + + if (dsi->state & DSIM_STATE_INITIALIZED) + return 0; + + samsung_dsim_reset(dsi); + samsung_dsim_enable_irq(dsi); + + if (driver_data->reg_values[RESET_TYPE] == DSIM_FUNCRST) + samsung_dsim_enable_lane(dsi, BIT(dsi->lanes) - 1); + + samsung_dsim_enable_clock(dsi); + if (driver_data->wait_for_reset) + samsung_dsim_wait_for_reset(dsi); + samsung_dsim_set_phy_ctrl(dsi); + samsung_dsim_init_link(dsi); + + dsi->state |= DSIM_STATE_INITIALIZED; + + return 0; +} + +static void samsung_dsim_atomic_pre_enable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct samsung_dsim *dsi = bridge_to_dsi(bridge); + int ret; + + if (dsi->state & DSIM_STATE_ENABLED) + return; + + ret = pm_runtime_resume_and_get(dsi->dev); + if (ret < 0) { + dev_err(dsi->dev, "failed to enable DSI device.\n"); + return; + } + + dsi->state |= DSIM_STATE_ENABLED; + + /* + * For Exynos-DSIM the downstream bridge, or panel are expecting + * the host initialization during DSI transfer. + */ + if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) { + ret = samsung_dsim_init(dsi); + if (ret) + return; + } +} + +static void samsung_dsim_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct samsung_dsim *dsi = bridge_to_dsi(bridge); + + samsung_dsim_set_display_mode(dsi); + samsung_dsim_set_display_enable(dsi, true); + + dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE; +} + +static void samsung_dsim_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct samsung_dsim *dsi = bridge_to_dsi(bridge); + + if (!(dsi->state & DSIM_STATE_ENABLED)) + return; + + dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE; +} + +static void samsung_dsim_atomic_post_disable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct samsung_dsim *dsi = bridge_to_dsi(bridge); + + samsung_dsim_set_display_enable(dsi, false); + + dsi->state &= ~DSIM_STATE_ENABLED; + pm_runtime_put_sync(dsi->dev); +} + +/* + * This pixel output formats list referenced from, + * AN13573 i.MX 8/RT MIPI DSI/CSI-2, Rev. 0, 21 March 2022 + * 3.7.4 Pixel formats + * Table 14. DSI pixel packing formats + */ +static const u32 samsung_dsim_pixel_output_fmts[] = { + MEDIA_BUS_FMT_YUYV10_1X20, + MEDIA_BUS_FMT_YUYV12_1X24, + MEDIA_BUS_FMT_UYVY8_1X16, + MEDIA_BUS_FMT_RGB101010_1X30, + MEDIA_BUS_FMT_RGB121212_1X36, + MEDIA_BUS_FMT_RGB565_1X16, + MEDIA_BUS_FMT_RGB666_1X18, + MEDIA_BUS_FMT_RGB888_1X24, +}; + +static bool samsung_dsim_pixel_output_fmt_supported(u32 fmt) +{ + int i; + + if (fmt == MEDIA_BUS_FMT_FIXED) + return false; + + for (i = 0; i < ARRAY_SIZE(samsung_dsim_pixel_output_fmts); i++) { + if (samsung_dsim_pixel_output_fmts[i] == fmt) + return true; + } + + return false; +} + +static u32 * +samsung_dsim_atomic_get_input_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts) +{ + u32 *input_fmts; + + input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL); + if (!input_fmts) + return NULL; + + if (!samsung_dsim_pixel_output_fmt_supported(output_fmt)) + /* + * Some bridge/display drivers are still not able to pass the + * correct format, so handle those pipelines by falling back + * to the default format till the supported formats finalized. + */ + output_fmt = MEDIA_BUS_FMT_RGB888_1X24; + + input_fmts[0] = output_fmt; + *num_input_fmts = 1; + + return input_fmts; +} + +static int samsung_dsim_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct samsung_dsim *dsi = bridge_to_dsi(bridge); + struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; + + /* + * The i.MX8M Mini/Nano glue logic between LCDIF and DSIM + * inverts HS/VS/DE sync signals polarity, therefore, while + * i.MX 8M Mini Applications Processor Reference Manual Rev. 3, 11/2020 + * 13.6.3.5.2 RGB interface + * i.MX 8M Nano Applications Processor Reference Manual Rev. 2, 07/2022 + * 13.6.2.7.2 RGB interface + * both claim "Vsync, Hsync, and VDEN are active high signals.", the + * LCDIF must generate inverted HS/VS/DE signals, i.e. active LOW. + * + * The i.MX8M Plus glue logic between LCDIFv3 and DSIM does not + * implement the same behavior, therefore LCDIFv3 must generate + * HS/VS/DE signals active HIGH. + */ + if (dsi->plat_data->hw_type == DSIM_TYPE_IMX8MM) { + adjusted_mode->flags |= (DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC); + adjusted_mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); + } else if (dsi->plat_data->hw_type == DSIM_TYPE_IMX8MP) { + adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC); + adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); + } + + return 0; +} + +static void samsung_dsim_mode_set(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode) +{ + struct samsung_dsim *dsi = bridge_to_dsi(bridge); + + drm_mode_copy(&dsi->mode, adjusted_mode); +} + +static int samsung_dsim_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct samsung_dsim *dsi = bridge_to_dsi(bridge); + + return drm_bridge_attach(bridge->encoder, dsi->out_bridge, bridge, + flags); +} + +static const struct drm_bridge_funcs samsung_dsim_bridge_funcs = { + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .atomic_get_input_bus_fmts = samsung_dsim_atomic_get_input_bus_fmts, + .atomic_check = samsung_dsim_atomic_check, + .atomic_pre_enable = samsung_dsim_atomic_pre_enable, + .atomic_enable = samsung_dsim_atomic_enable, + .atomic_disable = samsung_dsim_atomic_disable, + .atomic_post_disable = samsung_dsim_atomic_post_disable, + .mode_set = samsung_dsim_mode_set, + .attach = samsung_dsim_attach, +}; + +static irqreturn_t samsung_dsim_te_irq_handler(int irq, void *dev_id) +{ + struct samsung_dsim *dsi = (struct samsung_dsim *)dev_id; + const struct samsung_dsim_plat_data *pdata = dsi->plat_data; + + if (pdata->host_ops && pdata->host_ops->te_irq_handler) + return pdata->host_ops->te_irq_handler(dsi); + + return IRQ_HANDLED; +} + +static int samsung_dsim_register_te_irq(struct samsung_dsim *dsi, struct device *dev) +{ + int te_gpio_irq; + int ret; + + dsi->te_gpio = devm_gpiod_get_optional(dev, "te", GPIOD_IN); + if (!dsi->te_gpio) + return 0; + else if (IS_ERR(dsi->te_gpio)) + return dev_err_probe(dev, PTR_ERR(dsi->te_gpio), "failed to get te GPIO\n"); + + te_gpio_irq = gpiod_to_irq(dsi->te_gpio); + + ret = request_threaded_irq(te_gpio_irq, samsung_dsim_te_irq_handler, NULL, + IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN, "TE", dsi); + if (ret) { + dev_err(dsi->dev, "request interrupt failed with %d\n", ret); + gpiod_put(dsi->te_gpio); + return ret; + } + + return 0; +} + +static int samsung_dsim_host_attach(struct mipi_dsi_host *host, + struct mipi_dsi_device *device) +{ + struct samsung_dsim *dsi = host_to_dsi(host); + const struct samsung_dsim_plat_data *pdata = dsi->plat_data; + struct device *dev = dsi->dev; + struct device_node *np = dev->of_node; + struct device_node *remote; + struct drm_panel *panel; + int ret; + + /* + * Devices can also be child nodes when we also control that device + * through the upstream device (ie, MIPI-DCS for a MIPI-DSI device). + * + * Lookup for a child node of the given parent that isn't either port + * or ports. + */ + for_each_available_child_of_node(np, remote) { + if (of_node_name_eq(remote, "port") || + of_node_name_eq(remote, "ports")) + continue; + + goto of_find_panel_or_bridge; + } + + /* + * of_graph_get_remote_node() produces a noisy error message if port + * node isn't found and the absence of the port is a legit case here, + * so at first we silently check whether graph presents in the + * device-tree node. + */ + if (!of_graph_is_present(np)) + return -ENODEV; + + remote = of_graph_get_remote_node(np, 1, 0); + +of_find_panel_or_bridge: + if (!remote) + return -ENODEV; + + panel = of_drm_find_panel(remote); + if (!IS_ERR(panel)) { + dsi->out_bridge = devm_drm_panel_bridge_add(dev, panel); + } else { + dsi->out_bridge = of_drm_find_bridge(remote); + if (!dsi->out_bridge) + dsi->out_bridge = ERR_PTR(-EINVAL); + } + + of_node_put(remote); + + if (IS_ERR(dsi->out_bridge)) { + ret = PTR_ERR(dsi->out_bridge); + DRM_DEV_ERROR(dev, "failed to find the bridge: %d\n", ret); + return ret; + } + + DRM_DEV_INFO(dev, "Attached %s device\n", device->name); + + drm_bridge_add(&dsi->bridge); + + /* + * This is a temporary solution and should be made by more generic way. + * + * If attached panel device is for command mode one, dsi should register + * TE interrupt handler. + */ + if (!(device->mode_flags & MIPI_DSI_MODE_VIDEO)) { + ret = samsung_dsim_register_te_irq(dsi, &device->dev); + if (ret) + return ret; + } + + if (pdata->host_ops && pdata->host_ops->attach) { + ret = pdata->host_ops->attach(dsi, device); + if (ret) + return ret; + } + + dsi->lanes = device->lanes; + dsi->format = device->format; + dsi->mode_flags = device->mode_flags; + + return 0; +} + +static void samsung_dsim_unregister_te_irq(struct samsung_dsim *dsi) +{ + if (dsi->te_gpio) { + free_irq(gpiod_to_irq(dsi->te_gpio), dsi); + gpiod_put(dsi->te_gpio); + } +} + +static int samsung_dsim_host_detach(struct mipi_dsi_host *host, + struct mipi_dsi_device *device) +{ + struct samsung_dsim *dsi = host_to_dsi(host); + const struct samsung_dsim_plat_data *pdata = dsi->plat_data; + + dsi->out_bridge = NULL; + + if (pdata->host_ops && pdata->host_ops->detach) + pdata->host_ops->detach(dsi, device); + + samsung_dsim_unregister_te_irq(dsi); + + drm_bridge_remove(&dsi->bridge); + + return 0; +} + +static ssize_t samsung_dsim_host_transfer(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + struct samsung_dsim *dsi = host_to_dsi(host); + struct samsung_dsim_transfer xfer; + int ret; + + if (!(dsi->state & DSIM_STATE_ENABLED)) + return -EINVAL; + + ret = samsung_dsim_init(dsi); + if (ret) + return ret; + + ret = mipi_dsi_create_packet(&xfer.packet, msg); + if (ret < 0) + return ret; + + xfer.rx_len = msg->rx_len; + xfer.rx_payload = msg->rx_buf; + xfer.flags = msg->flags; + + ret = samsung_dsim_transfer(dsi, &xfer); + return (ret < 0) ? ret : xfer.rx_done; +} + +static const struct mipi_dsi_host_ops samsung_dsim_ops = { + .attach = samsung_dsim_host_attach, + .detach = samsung_dsim_host_detach, + .transfer = samsung_dsim_host_transfer, +}; + +static int samsung_dsim_of_read_u32(const struct device_node *np, + const char *propname, u32 *out_value) +{ + int ret = of_property_read_u32(np, propname, out_value); + + if (ret < 0) + pr_err("%pOF: failed to get '%s' property\n", np, propname); + + return ret; +} + +static int samsung_dsim_parse_dt(struct samsung_dsim *dsi) +{ + struct device *dev = dsi->dev; + struct device_node *node = dev->of_node; + int ret; + + ret = samsung_dsim_of_read_u32(node, "samsung,pll-clock-frequency", + &dsi->pll_clk_rate); + if (ret < 0) + return ret; + + ret = samsung_dsim_of_read_u32(node, "samsung,burst-clock-frequency", + &dsi->burst_clk_rate); + if (ret < 0) + return ret; + + ret = samsung_dsim_of_read_u32(node, "samsung,esc-clock-frequency", + &dsi->esc_clk_rate); + if (ret < 0) + return ret; + + return 0; +} + +static int generic_dsim_register_host(struct samsung_dsim *dsi) +{ + return mipi_dsi_host_register(&dsi->dsi_host); +} + +static void generic_dsim_unregister_host(struct samsung_dsim *dsi) +{ + mipi_dsi_host_unregister(&dsi->dsi_host); +} + +static const struct samsung_dsim_host_ops generic_dsim_host_ops = { + .register_host = generic_dsim_register_host, + .unregister_host = generic_dsim_unregister_host, +}; + +static const struct drm_bridge_timings samsung_dsim_bridge_timings_de_high = { + .input_bus_flags = DRM_BUS_FLAG_DE_HIGH, +}; + +static const struct drm_bridge_timings samsung_dsim_bridge_timings_de_low = { + .input_bus_flags = DRM_BUS_FLAG_DE_LOW, +}; + +int samsung_dsim_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct samsung_dsim *dsi; + int ret, i; + + dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); + if (!dsi) + return -ENOMEM; + + init_completion(&dsi->completed); + spin_lock_init(&dsi->transfer_lock); + INIT_LIST_HEAD(&dsi->transfer_list); + + dsi->dsi_host.ops = &samsung_dsim_ops; + dsi->dsi_host.dev = dev; + + dsi->dev = dev; + dsi->plat_data = of_device_get_match_data(dev); + dsi->driver_data = samsung_dsim_types[dsi->plat_data->hw_type]; + + dsi->supplies[0].supply = "vddcore"; + dsi->supplies[1].supply = "vddio"; + ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies), + dsi->supplies); + if (ret) + return dev_err_probe(dev, ret, "failed to get regulators\n"); + + dsi->clks = devm_kcalloc(dev, dsi->driver_data->num_clks, + sizeof(*dsi->clks), GFP_KERNEL); + if (!dsi->clks) + return -ENOMEM; + + for (i = 0; i < dsi->driver_data->num_clks; i++) { + dsi->clks[i] = devm_clk_get(dev, clk_names[i]); + if (IS_ERR(dsi->clks[i])) { + if (strcmp(clk_names[i], "sclk_mipi") == 0) { + dsi->clks[i] = devm_clk_get(dev, OLD_SCLK_MIPI_CLK_NAME); + if (!IS_ERR(dsi->clks[i])) + continue; + } + + dev_info(dev, "failed to get the clock: %s\n", clk_names[i]); + return PTR_ERR(dsi->clks[i]); + } + } + + dsi->reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(dsi->reg_base)) + return PTR_ERR(dsi->reg_base); + + dsi->phy = devm_phy_optional_get(dev, "dsim"); + if (IS_ERR(dsi->phy)) { + dev_info(dev, "failed to get dsim phy\n"); + return PTR_ERR(dsi->phy); + } + + dsi->irq = platform_get_irq(pdev, 0); + if (dsi->irq < 0) + return dsi->irq; + + ret = devm_request_threaded_irq(dev, dsi->irq, NULL, + samsung_dsim_irq, + IRQF_ONESHOT | IRQF_NO_AUTOEN, + dev_name(dev), dsi); + if (ret) { + dev_err(dev, "failed to request dsi irq\n"); + return ret; + } + + ret = samsung_dsim_parse_dt(dsi); + if (ret) + return ret; + + platform_set_drvdata(pdev, dsi); + + pm_runtime_enable(dev); + + dsi->bridge.funcs = &samsung_dsim_bridge_funcs; + dsi->bridge.of_node = dev->of_node; + dsi->bridge.type = DRM_MODE_CONNECTOR_DSI; + + /* DE_LOW: i.MX8M Mini/Nano LCDIF-DSIM glue logic inverts HS/VS/DE */ + if (dsi->plat_data->hw_type == DSIM_TYPE_IMX8MM) + dsi->bridge.timings = &samsung_dsim_bridge_timings_de_low; + else + dsi->bridge.timings = &samsung_dsim_bridge_timings_de_high; + + if (dsi->plat_data->host_ops && dsi->plat_data->host_ops->register_host) + ret = dsi->plat_data->host_ops->register_host(dsi); + + if (ret) + goto err_disable_runtime; + + return 0; + +err_disable_runtime: + pm_runtime_disable(dev); + + return ret; +} +EXPORT_SYMBOL_GPL(samsung_dsim_probe); + +int samsung_dsim_remove(struct platform_device *pdev) +{ + struct samsung_dsim *dsi = platform_get_drvdata(pdev); + + pm_runtime_disable(&pdev->dev); + + if (dsi->plat_data->host_ops && dsi->plat_data->host_ops->unregister_host) + dsi->plat_data->host_ops->unregister_host(dsi); + + return 0; +} +EXPORT_SYMBOL_GPL(samsung_dsim_remove); + +static int __maybe_unused samsung_dsim_suspend(struct device *dev) +{ + struct samsung_dsim *dsi = dev_get_drvdata(dev); + const struct samsung_dsim_driver_data *driver_data = dsi->driver_data; + int ret, i; + + usleep_range(10000, 20000); + + if (dsi->state & DSIM_STATE_INITIALIZED) { + dsi->state &= ~DSIM_STATE_INITIALIZED; + + samsung_dsim_disable_clock(dsi); + + samsung_dsim_disable_irq(dsi); + } + + dsi->state &= ~DSIM_STATE_CMD_LPM; + + phy_power_off(dsi->phy); + + for (i = driver_data->num_clks - 1; i > -1; i--) + clk_disable_unprepare(dsi->clks[i]); + + ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies); + if (ret < 0) + dev_err(dsi->dev, "cannot disable regulators %d\n", ret); + + return 0; +} + +static int __maybe_unused samsung_dsim_resume(struct device *dev) +{ + struct samsung_dsim *dsi = dev_get_drvdata(dev); + const struct samsung_dsim_driver_data *driver_data = dsi->driver_data; + int ret, i; + + ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies); + if (ret < 0) { + dev_err(dsi->dev, "cannot enable regulators %d\n", ret); + return ret; + } + + for (i = 0; i < driver_data->num_clks; i++) { + ret = clk_prepare_enable(dsi->clks[i]); + if (ret < 0) + goto err_clk; + } + + ret = phy_power_on(dsi->phy); + if (ret < 0) { + dev_err(dsi->dev, "cannot enable phy %d\n", ret); + goto err_clk; + } + + return 0; + +err_clk: + while (--i > -1) + clk_disable_unprepare(dsi->clks[i]); + regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies); + + return ret; +} + +const struct dev_pm_ops samsung_dsim_pm_ops = { + SET_RUNTIME_PM_OPS(samsung_dsim_suspend, samsung_dsim_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) +}; +EXPORT_SYMBOL_GPL(samsung_dsim_pm_ops); + +static const struct samsung_dsim_plat_data samsung_dsim_imx8mm_pdata = { + .hw_type = DSIM_TYPE_IMX8MM, + .host_ops = &generic_dsim_host_ops, +}; + +static const struct samsung_dsim_plat_data samsung_dsim_imx8mp_pdata = { + .hw_type = DSIM_TYPE_IMX8MP, + .host_ops = &generic_dsim_host_ops, +}; + +static const struct of_device_id samsung_dsim_of_match[] = { + { + .compatible = "fsl,imx8mm-mipi-dsim", + .data = &samsung_dsim_imx8mm_pdata, + }, + { + .compatible = "fsl,imx8mp-mipi-dsim", + .data = &samsung_dsim_imx8mp_pdata, + }, + { /* sentinel. */ } +}; +MODULE_DEVICE_TABLE(of, samsung_dsim_of_match); + +static struct platform_driver samsung_dsim_driver = { + .probe = samsung_dsim_probe, + .remove = samsung_dsim_remove, + .driver = { + .name = "samsung-dsim", + .owner = THIS_MODULE, + .pm = &samsung_dsim_pm_ops, + .of_match_table = samsung_dsim_of_match, + }, +}; + +module_platform_driver(samsung_dsim_driver); + +MODULE_AUTHOR("Jagan Teki <jagan@amarulasolutions.com>"); +MODULE_DESCRIPTION("Samsung MIPI DSIM controller bridge"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c index 099b510ff285..2d17f227867b 100644 --- a/drivers/gpu/drm/bridge/sii9234.c +++ b/drivers/gpu/drm/bridge/sii9234.c @@ -867,11 +867,6 @@ static int sii9234_init_resources(struct sii9234 *ctx, return 0; } -static inline struct sii9234 *bridge_to_sii9234(struct drm_bridge *bridge) -{ - return container_of(bridge, struct sii9234, bridge); -} - static enum drm_mode_status sii9234_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c index 2c5c5211bdab..d85d9ee463b8 100644 --- a/drivers/gpu/drm/bridge/simple-bridge.c +++ b/drivers/gpu/drm/bridge/simple-bridge.c @@ -202,11 +202,9 @@ static int simple_bridge_probe(struct platform_device *pdev) sbridge->enable = devm_gpiod_get_optional(&pdev->dev, "enable", GPIOD_OUT_LOW); - if (IS_ERR(sbridge->enable)) { - if (PTR_ERR(sbridge->enable) != -EPROBE_DEFER) - dev_err(&pdev->dev, "Unable to retrieve enable GPIO\n"); - return PTR_ERR(sbridge->enable); - } + if (IS_ERR(sbridge->enable)) + return dev_err_probe(&pdev->dev, PTR_ERR(sbridge->enable), + "Unable to retrieve enable GPIO\n"); /* Register the bridge. */ sbridge->bridge.funcs = &simple_bridge_bridge_funcs; @@ -218,13 +216,11 @@ static int simple_bridge_probe(struct platform_device *pdev) return 0; } -static int simple_bridge_remove(struct platform_device *pdev) +static void simple_bridge_remove(struct platform_device *pdev) { struct simple_bridge *sbridge = platform_get_drvdata(pdev); drm_bridge_remove(&sbridge->bridge); - - return 0; } /* @@ -301,7 +297,7 @@ MODULE_DEVICE_TABLE(of, simple_bridge_match); static struct platform_driver simple_bridge_driver = { .probe = simple_bridge_probe, - .remove = simple_bridge_remove, + .remove_new = simple_bridge_remove, .driver = { .name = "simple-bridge", .of_match_table = simple_bridge_match, diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c index 4efb62bcdb63..67b8d17a722a 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c @@ -584,13 +584,11 @@ err: return ret; } -static int snd_dw_hdmi_remove(struct platform_device *pdev) +static void snd_dw_hdmi_remove(struct platform_device *pdev) { struct snd_dw_hdmi *dw = platform_get_drvdata(pdev); snd_card_free(dw->card); - - return 0; } #if defined(CONFIG_PM_SLEEP) && defined(IS_NOT_BROKEN) @@ -625,7 +623,7 @@ static SIMPLE_DEV_PM_OPS(snd_dw_hdmi_pm, snd_dw_hdmi_suspend, static struct platform_driver snd_dw_hdmi_driver = { .probe = snd_dw_hdmi_probe, - .remove = snd_dw_hdmi_remove, + .remove_new = snd_dw_hdmi_remove, .driver = { .name = DRIVER_NAME, .pm = PM_OPS, diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c index c8f44bcb298a..9389ce526eb1 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c @@ -296,19 +296,17 @@ static int dw_hdmi_cec_probe(struct platform_device *pdev) return 0; } -static int dw_hdmi_cec_remove(struct platform_device *pdev) +static void dw_hdmi_cec_remove(struct platform_device *pdev) { struct dw_hdmi_cec *cec = platform_get_drvdata(pdev); cec_notifier_cec_adap_unregister(cec->notify, cec->adap); cec_unregister_adapter(cec->adap); - - return 0; } static struct platform_driver dw_hdmi_cec_driver = { .probe = dw_hdmi_cec_probe, - .remove = dw_hdmi_cec_remove, + .remove_new = dw_hdmi_cec_remove, .driver = { .name = "dw-hdmi-cec", }, diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c index 557966239677..423762da2ab4 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c @@ -172,18 +172,16 @@ static int snd_dw_hdmi_probe(struct platform_device *pdev) return PTR_ERR_OR_ZERO(dw->audio_pdev); } -static int snd_dw_hdmi_remove(struct platform_device *pdev) +static void snd_dw_hdmi_remove(struct platform_device *pdev) { struct snd_dw_hdmi *dw = platform_get_drvdata(pdev); platform_device_unregister(dw->audio_pdev); - - return 0; } static struct platform_driver snd_dw_hdmi_driver = { .probe = snd_dw_hdmi_probe, - .remove = snd_dw_hdmi_remove, + .remove_new = snd_dw_hdmi_remove, .driver = { .name = DRIVER_NAME, }, diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c index d751820c6da6..26c187d20d97 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c @@ -216,18 +216,16 @@ static int snd_dw_hdmi_probe(struct platform_device *pdev) return 0; } -static int snd_dw_hdmi_remove(struct platform_device *pdev) +static void snd_dw_hdmi_remove(struct platform_device *pdev) { struct platform_device *platform = dev_get_drvdata(&pdev->dev); platform_device_unregister(platform); - - return 0; } static struct platform_driver snd_dw_hdmi_driver = { .probe = snd_dw_hdmi_probe, - .remove = snd_dw_hdmi_remove, + .remove_new = snd_dw_hdmi_remove, .driver = { .name = DRIVER_NAME, }, diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c index e21078b2f8b5..d4c1a601bbb5 100644 --- a/drivers/gpu/drm/bridge/thc63lvd1024.c +++ b/drivers/gpu/drm/bridge/thc63lvd1024.c @@ -230,13 +230,11 @@ static int thc63_probe(struct platform_device *pdev) return 0; } -static int thc63_remove(struct platform_device *pdev) +static void thc63_remove(struct platform_device *pdev) { struct thc63_dev *thc63 = platform_get_drvdata(pdev); drm_bridge_remove(&thc63->bridge); - - return 0; } static const struct of_device_id thc63_match[] = { @@ -247,7 +245,7 @@ MODULE_DEVICE_TABLE(of, thc63_match); static struct platform_driver thc63_driver = { .probe = thc63_probe, - .remove = thc63_remove, + .remove_new = thc63_remove, .driver = { .name = "thc63lvd1024", .of_match_table = thc63_match, diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c index 6db69df0e18b..ab63225cd635 100644 --- a/drivers/gpu/drm/bridge/ti-tfp410.c +++ b/drivers/gpu/drm/bridge/ti-tfp410.c @@ -355,11 +355,9 @@ static int tfp410_probe(struct platform_device *pdev) return tfp410_init(&pdev->dev, false); } -static int tfp410_remove(struct platform_device *pdev) +static void tfp410_remove(struct platform_device *pdev) { tfp410_fini(&pdev->dev); - - return 0; } static const struct of_device_id tfp410_match[] = { @@ -370,7 +368,7 @@ MODULE_DEVICE_TABLE(of, tfp410_match); static struct platform_driver tfp410_platform_driver = { .probe = tfp410_probe, - .remove = tfp410_remove, + .remove_new = tfp410_remove, .driver = { .name = "tfp410-bridge", .of_match_table = tfp410_match, diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 8606876f7233..d4d2a2ce40f8 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1511,6 +1511,41 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, } EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables); +/* + * For atomic updates which touch just a single CRTC, calculate the time of the + * next vblank, and inform all the fences of the deadline. + */ +static void set_fence_deadline(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *new_crtc_state; + struct drm_plane *plane; + struct drm_plane_state *new_plane_state; + ktime_t vbltime = 0; + int i; + + for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) { + ktime_t v; + + if (drm_crtc_next_vblank_start(crtc, &v)) + continue; + + if (!vbltime || ktime_before(v, vbltime)) + vbltime = v; + } + + /* If no CRTCs updated, then nothing to do: */ + if (!vbltime) + return; + + for_each_new_plane_in_state (state, plane, new_plane_state, i) { + if (!new_plane_state->fence) + continue; + dma_fence_set_deadline(new_plane_state->fence, vbltime); + } +} + /** * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state * @dev: DRM device @@ -1540,6 +1575,8 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev, struct drm_plane_state *new_plane_state; int i, ret; + set_fence_deadline(dev, state); + for_each_new_plane_in_state(state, plane, new_plane_state, i) { if (!new_plane_state->fence) continue; diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 4b12c7a39ee3..48df7a5ea503 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -1452,7 +1452,7 @@ static const struct drm_prop_enum_list dp_colorspaces[] = { * * left margin, right margin, top margin, bottom margin: * Add margins to the connector's viewport. This is typically used to - * mitigate underscan on TVs. + * mitigate overscan on TVs. * * The value is the size in pixels of the black border which will be * added. The attached CRTC's content will be scaled to fill the whole diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index c6eb8972451a..cee0cc522ed9 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -691,9 +691,11 @@ static int drm_dev_init(struct drm_device *dev, } } - ret = drm_dev_set_unique(dev, dev_name(parent)); - if (ret) + dev->unique = drmm_kstrdup(dev, dev_name(parent), GFP_KERNEL); + if (!dev->unique) { + ret = -ENOMEM; goto err; + } return 0; @@ -1000,26 +1002,6 @@ void drm_dev_unregister(struct drm_device *dev) } EXPORT_SYMBOL(drm_dev_unregister); -/** - * drm_dev_set_unique - Set the unique name of a DRM device - * @dev: device of which to set the unique name - * @name: unique name - * - * Sets the unique name of a DRM device using the specified string. This is - * already done by drm_dev_init(), drivers should only override the default - * unique name for backwards compatibility reasons. - * - * Return: 0 on success or a negative error code on failure. - */ -int drm_dev_set_unique(struct drm_device *dev, const char *name) -{ - drmm_kfree(dev, dev->unique); - dev->unique = drmm_kstrdup(dev, name, GFP_KERNEL); - - return dev->unique ? 0 : -ENOMEM; -} -EXPORT_SYMBOL(drm_dev_set_unique); - /* * DRM Core * The DRM core module initializes all global DRM objects and makes them diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index c18ec866678d..0454da505687 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2796,7 +2796,7 @@ u32 drm_edid_get_panel_id(struct i2c_adapter *adapter) * the EDID then we'll just return 0. */ - base_block = kmalloc(EDID_LENGTH, GFP_KERNEL); + base_block = kzalloc(EDID_LENGTH, GFP_KERNEL); if (!base_block) return 0; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index a39998047f8a..63ec95e86d0e 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -60,16 +60,17 @@ MODULE_PARM_DESC(drm_fbdev_overalloc, * In order to keep user-space compatibility, we want in certain use-cases * to keep leaking the fbdev physical address to the user-space program * handling the fbdev buffer. - * This is a bad habit essentially kept into closed source opengl driver - * that should really be moved into open-source upstream projects instead - * of using legacy physical addresses in user space to communicate with - * other out-of-tree kernel modules. + * + * This is a bad habit, essentially kept to support closed-source OpenGL + * drivers that should really be moved into open-source upstream projects + * instead of using legacy physical addresses in user space to communicate + * with other out-of-tree kernel modules. * * This module_param *should* be removed as soon as possible and be * considered as a broken and legacy behaviour from a modern fbdev device. */ -#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) static bool drm_leak_fbdev_smem; +#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) module_param_unsafe(drm_leak_fbdev_smem, bool, 0600); MODULE_PARM_DESC(drm_leak_fbdev_smem, "Allow unsafe leaking fbdev physical smem address [default=false]"); @@ -539,6 +540,29 @@ err_release: EXPORT_SYMBOL(drm_fb_helper_alloc_info); /** + * drm_fb_helper_release_info - release fb_info and its members + * @fb_helper: driver-allocated fbdev helper + * + * A helper to release fb_info and the member cmap. Drivers do not + * need to release the allocated fb_info structure themselves, this is + * automatically done when calling drm_fb_helper_fini(). + */ +void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper) +{ + struct fb_info *info = fb_helper->info; + + if (!info) + return; + + fb_helper->info = NULL; + + if (info->cmap.len) + fb_dealloc_cmap(&info->cmap); + framebuffer_release(info); +} +EXPORT_SYMBOL(drm_fb_helper_release_info); + +/** * drm_fb_helper_unregister_info - unregister fb_info framebuffer device * @fb_helper: driver-allocated fbdev helper, can be NULL * @@ -561,8 +585,6 @@ EXPORT_SYMBOL(drm_fb_helper_unregister_info); */ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) { - struct fb_info *info; - if (!fb_helper) return; @@ -574,13 +596,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) cancel_work_sync(&fb_helper->resume_work); cancel_work_sync(&fb_helper->damage_work); - info = fb_helper->info; - if (info) { - if (info->cmap.len) - fb_dealloc_cmap(&info->cmap); - framebuffer_release(info); - } - fb_helper->info = NULL; + drm_fb_helper_release_info(fb_helper); mutex_lock(&kernel_fb_helper_lock); if (!list_empty(&fb_helper->kernel_fb_list)) { @@ -657,7 +673,7 @@ static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off, void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist) { struct drm_fb_helper *helper = info->par; - unsigned long start, end, min_off, max_off; + unsigned long start, end, min_off, max_off, total_size; struct fb_deferred_io_pageref *pageref; struct drm_rect damage_area; @@ -675,7 +691,11 @@ void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagerefli * of the screen and account for non-existing scanlines. Hence, * keep the covered memory area within the screen buffer. */ - max_off = min(max_off, info->screen_size); + if (info->screen_size) + total_size = info->screen_size; + else + total_size = info->fix.smem_len; + max_off = min(max_off, total_size); if (min_off < max_off) { drm_fb_helper_memory_range_to_clip(info, min_off, max_off - min_off, &damage_area); @@ -1964,10 +1984,6 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper) return ret; } -#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) - fb_helper->hint_leak_smem_start = drm_leak_fbdev_smem; -#endif - /* push down into drivers */ ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes); if (ret < 0) @@ -2166,11 +2182,8 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper) info = fb_helper->info; info->var.pixclock = 0; - /* Shamelessly allow physical address leaking to userspace */ -#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) - if (!fb_helper->hint_leak_smem_start) -#endif - /* don't leak any physical addresses to userspace */ + + if (!drm_leak_fbdev_smem) info->flags |= FBINFO_HIDE_SMEM_START; /* Need to drop locks to avoid recursive deadlock in diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c index cf553ac12a0f..728deffcc0d9 100644 --- a/drivers/gpu/drm/drm_fbdev_dma.c +++ b/drivers/gpu/drm/drm_fbdev_dma.c @@ -136,16 +136,9 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper, info->flags |= FBINFO_READS_FAST; /* signal caching */ info->screen_size = sizes->surface_height * fb->pitches[0]; info->screen_buffer = map.vaddr; + info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer)); info->fix.smem_len = info->screen_size; -#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) - /* - * Shamelessly leak the physical address to user-space. - */ - if (fb_helper->hint_leak_smem_start && !info->fix.smem_start) - info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer)); -#endif - return 0; err_drm_client_buffer_vunmap: diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c index 4d6325e91565..8e5148bf40bb 100644 --- a/drivers/gpu/drm/drm_fbdev_generic.c +++ b/drivers/gpu/drm/drm_fbdev_generic.c @@ -7,22 +7,13 @@ #include <drm/drm_drv.h> #include <drm/drm_fb_helper.h> #include <drm/drm_framebuffer.h> +#include <drm/drm_gem.h> #include <drm/drm_print.h> #include <drm/drm_fbdev_generic.h> -static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper) -{ - struct drm_device *dev = fb_helper->dev; - struct drm_framebuffer *fb = fb_helper->fb; - - return dev->mode_config.prefer_shadow_fbdev || - dev->mode_config.prefer_shadow || - fb->funcs->dirty; -} - /* @user: 1=userspace, 0=fbcon */ -static int drm_fbdev_fb_open(struct fb_info *info, int user) +static int drm_fbdev_generic_fb_open(struct fb_info *info, int user) { struct drm_fb_helper *fb_helper = info->par; @@ -33,7 +24,7 @@ static int drm_fbdev_fb_open(struct fb_info *info, int user) return 0; } -static int drm_fbdev_fb_release(struct fb_info *info, int user) +static int drm_fbdev_generic_fb_release(struct fb_info *info, int user) { struct drm_fb_helper *fb_helper = info->par; @@ -43,133 +34,51 @@ static int drm_fbdev_fb_release(struct fb_info *info, int user) return 0; } -static void drm_fbdev_fb_destroy(struct fb_info *info) +static void drm_fbdev_generic_fb_destroy(struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; - void *shadow = NULL; + void *shadow = info->screen_buffer; if (!fb_helper->dev) return; - if (info->fbdefio) - fb_deferred_io_cleanup(info); - if (drm_fbdev_use_shadow_fb(fb_helper)) - shadow = info->screen_buffer; - + fb_deferred_io_cleanup(info); drm_fb_helper_fini(fb_helper); - - if (shadow) - vfree(shadow); - else if (fb_helper->buffer) - drm_client_buffer_vunmap(fb_helper->buffer); - + vfree(shadow); drm_client_framebuffer_delete(fb_helper->buffer); - drm_client_release(&fb_helper->client); + drm_client_release(&fb_helper->client); drm_fb_helper_unprepare(fb_helper); kfree(fb_helper); } -static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) -{ - struct drm_fb_helper *fb_helper = info->par; - - if (drm_fbdev_use_shadow_fb(fb_helper)) - return fb_deferred_io_mmap(info, vma); - else if (fb_helper->dev->driver->gem_prime_mmap) - return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma); - else - return -ENODEV; -} - -static bool drm_fbdev_use_iomem(struct fb_info *info) -{ - struct drm_fb_helper *fb_helper = info->par; - struct drm_client_buffer *buffer = fb_helper->buffer; - - return !drm_fbdev_use_shadow_fb(fb_helper) && buffer->map.is_iomem; -} - -static ssize_t drm_fbdev_fb_read(struct fb_info *info, char __user *buf, - size_t count, loff_t *ppos) -{ - ssize_t ret; - - if (drm_fbdev_use_iomem(info)) - ret = drm_fb_helper_cfb_read(info, buf, count, ppos); - else - ret = drm_fb_helper_sys_read(info, buf, count, ppos); - - return ret; -} - -static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf, - size_t count, loff_t *ppos) -{ - ssize_t ret; - - if (drm_fbdev_use_iomem(info)) - ret = drm_fb_helper_cfb_write(info, buf, count, ppos); - else - ret = drm_fb_helper_sys_write(info, buf, count, ppos); - - return ret; -} - -static void drm_fbdev_fb_fillrect(struct fb_info *info, - const struct fb_fillrect *rect) -{ - if (drm_fbdev_use_iomem(info)) - drm_fb_helper_cfb_fillrect(info, rect); - else - drm_fb_helper_sys_fillrect(info, rect); -} - -static void drm_fbdev_fb_copyarea(struct fb_info *info, - const struct fb_copyarea *area) -{ - if (drm_fbdev_use_iomem(info)) - drm_fb_helper_cfb_copyarea(info, area); - else - drm_fb_helper_sys_copyarea(info, area); -} - -static void drm_fbdev_fb_imageblit(struct fb_info *info, - const struct fb_image *image) -{ - if (drm_fbdev_use_iomem(info)) - drm_fb_helper_cfb_imageblit(info, image); - else - drm_fb_helper_sys_imageblit(info, image); -} - -static const struct fb_ops drm_fbdev_fb_ops = { +static const struct fb_ops drm_fbdev_generic_fb_ops = { .owner = THIS_MODULE, + .fb_open = drm_fbdev_generic_fb_open, + .fb_release = drm_fbdev_generic_fb_release, + .fb_read = drm_fb_helper_sys_read, + .fb_write = drm_fb_helper_sys_write, DRM_FB_HELPER_DEFAULT_OPS, - .fb_open = drm_fbdev_fb_open, - .fb_release = drm_fbdev_fb_release, - .fb_destroy = drm_fbdev_fb_destroy, - .fb_mmap = drm_fbdev_fb_mmap, - .fb_read = drm_fbdev_fb_read, - .fb_write = drm_fbdev_fb_write, - .fb_fillrect = drm_fbdev_fb_fillrect, - .fb_copyarea = drm_fbdev_fb_copyarea, - .fb_imageblit = drm_fbdev_fb_imageblit, + .fb_fillrect = drm_fb_helper_sys_fillrect, + .fb_copyarea = drm_fb_helper_sys_copyarea, + .fb_imageblit = drm_fb_helper_sys_imageblit, + .fb_mmap = fb_deferred_io_mmap, + .fb_destroy = drm_fbdev_generic_fb_destroy, }; /* * This function uses the client API to create a framebuffer backed by a dumb buffer. */ -static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper, - struct drm_fb_helper_surface_size *sizes) +static int drm_fbdev_generic_helper_fb_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) { struct drm_client_dev *client = &fb_helper->client; struct drm_device *dev = fb_helper->dev; struct drm_client_buffer *buffer; - struct drm_framebuffer *fb; struct fb_info *info; + size_t screen_size; + void *screen_buffer; u32 format; - struct iosys_map map; int ret; drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n", @@ -184,64 +93,56 @@ static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper, fb_helper->buffer = buffer; fb_helper->fb = buffer->fb; - fb = buffer->fb; - info = drm_fb_helper_alloc_info(fb_helper); - if (IS_ERR(info)) - return PTR_ERR(info); + screen_size = buffer->gem->size; + screen_buffer = vzalloc(screen_size); + if (!screen_buffer) { + ret = -ENOMEM; + goto err_drm_client_framebuffer_delete; + } - info->fbops = &drm_fbdev_fb_ops; - info->screen_size = sizes->surface_height * fb->pitches[0]; - info->fix.smem_len = info->screen_size; - info->flags = FBINFO_DEFAULT; + info = drm_fb_helper_alloc_info(fb_helper); + if (IS_ERR(info)) { + ret = PTR_ERR(info); + goto err_vfree; + } drm_fb_helper_fill_info(info, fb_helper, sizes); - if (drm_fbdev_use_shadow_fb(fb_helper)) { - info->screen_buffer = vzalloc(info->screen_size); - if (!info->screen_buffer) - return -ENOMEM; - info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST; + info->fbops = &drm_fbdev_generic_fb_ops; + info->flags = FBINFO_DEFAULT; - /* Set a default deferred I/O handler */ - fb_helper->fbdefio.delay = HZ / 20; - fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io; + /* screen */ + info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST; + info->screen_buffer = screen_buffer; + info->fix.smem_start = page_to_phys(vmalloc_to_page(info->screen_buffer)); + info->fix.smem_len = screen_size; - info->fbdefio = &fb_helper->fbdefio; - ret = fb_deferred_io_init(info); - if (ret) - return ret; - } else { - /* buffer is mapped for HW framebuffer */ - ret = drm_client_buffer_vmap(fb_helper->buffer, &map); - if (ret) - return ret; - if (map.is_iomem) { - info->screen_base = map.vaddr_iomem; - } else { - info->screen_buffer = map.vaddr; - info->flags |= FBINFO_VIRTFB; - } - - /* - * Shamelessly leak the physical address to user-space. As - * page_to_phys() is undefined for I/O memory, warn in this - * case. - */ -#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) - if (fb_helper->hint_leak_smem_start && info->fix.smem_start == 0 && - !drm_WARN_ON_ONCE(dev, map.is_iomem)) - info->fix.smem_start = - page_to_phys(virt_to_page(info->screen_buffer)); -#endif - } + /* deferred I/O */ + fb_helper->fbdefio.delay = HZ / 20; + fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io; + + info->fbdefio = &fb_helper->fbdefio; + ret = fb_deferred_io_init(info); + if (ret) + goto err_drm_fb_helper_release_info; return 0; + +err_drm_fb_helper_release_info: + drm_fb_helper_release_info(fb_helper); +err_vfree: + vfree(screen_buffer); +err_drm_client_framebuffer_delete: + fb_helper->fb = NULL; + fb_helper->buffer = NULL; + drm_client_framebuffer_delete(buffer); + return ret; } -static void drm_fbdev_damage_blit_real(struct drm_fb_helper *fb_helper, - struct drm_clip_rect *clip, - struct iosys_map *dst) +static void drm_fbdev_generic_damage_blit_real(struct drm_fb_helper *fb_helper, + struct drm_clip_rect *clip, + struct iosys_map *dst) { struct drm_framebuffer *fb = fb_helper->fb; size_t offset = clip->y1 * fb->pitches[0]; @@ -278,8 +179,8 @@ static void drm_fbdev_damage_blit_real(struct drm_fb_helper *fb_helper, } } -static int drm_fbdev_damage_blit(struct drm_fb_helper *fb_helper, - struct drm_clip_rect *clip) +static int drm_fbdev_generic_damage_blit(struct drm_fb_helper *fb_helper, + struct drm_clip_rect *clip) { struct drm_client_buffer *buffer = fb_helper->buffer; struct iosys_map map, dst; @@ -303,7 +204,7 @@ static int drm_fbdev_damage_blit(struct drm_fb_helper *fb_helper, goto out; dst = map; - drm_fbdev_damage_blit_real(fb_helper, clip, &dst); + drm_fbdev_generic_damage_blit_real(fb_helper, clip, &dst); drm_client_buffer_vunmap(buffer); @@ -313,23 +214,19 @@ out: return ret; } -static int drm_fbdev_fb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip) +static int drm_fbdev_generic_helper_fb_dirty(struct drm_fb_helper *helper, + struct drm_clip_rect *clip) { struct drm_device *dev = helper->dev; int ret; - if (!drm_fbdev_use_shadow_fb(helper)) - return 0; - /* Call damage handlers only if necessary */ if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2)) return 0; - if (helper->buffer) { - ret = drm_fbdev_damage_blit(helper, clip); - if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret)) - return ret; - } + ret = drm_fbdev_generic_damage_blit(helper, clip); + if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret)) + return ret; if (helper->fb->funcs->dirty) { ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); @@ -340,12 +237,12 @@ static int drm_fbdev_fb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect return 0; } -static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = { - .fb_probe = drm_fbdev_fb_probe, - .fb_dirty = drm_fbdev_fb_dirty, +static const struct drm_fb_helper_funcs drm_fbdev_generic_helper_funcs = { + .fb_probe = drm_fbdev_generic_helper_fb_probe, + .fb_dirty = drm_fbdev_generic_helper_fb_dirty, }; -static void drm_fbdev_client_unregister(struct drm_client_dev *client) +static void drm_fbdev_generic_client_unregister(struct drm_client_dev *client) { struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); @@ -358,14 +255,14 @@ static void drm_fbdev_client_unregister(struct drm_client_dev *client) } } -static int drm_fbdev_client_restore(struct drm_client_dev *client) +static int drm_fbdev_generic_client_restore(struct drm_client_dev *client) { drm_fb_helper_lastclose(client->dev); return 0; } -static int drm_fbdev_client_hotplug(struct drm_client_dev *client) +static int drm_fbdev_generic_client_hotplug(struct drm_client_dev *client) { struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); struct drm_device *dev = client->dev; @@ -394,11 +291,11 @@ err_drm_err: return ret; } -static const struct drm_client_funcs drm_fbdev_client_funcs = { +static const struct drm_client_funcs drm_fbdev_generic_client_funcs = { .owner = THIS_MODULE, - .unregister = drm_fbdev_client_unregister, - .restore = drm_fbdev_client_restore, - .hotplug = drm_fbdev_client_hotplug, + .unregister = drm_fbdev_generic_client_unregister, + .restore = drm_fbdev_generic_client_restore, + .hotplug = drm_fbdev_generic_client_hotplug, }; /** @@ -415,20 +312,16 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = { * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves. * Simple drivers might use drm_mode_config_helper_suspend(). * - * Drivers that set the dirty callback on their framebuffer will get a shadow - * fbdev buffer that is blitted onto the real buffer. This is done in order to - * make deferred I/O work with all kinds of buffers. A shadow buffer can be - * requested explicitly by setting struct drm_mode_config.prefer_shadow or - * struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is - * required to use generic fbdev emulation with SHMEM helpers. + * In order to provide fixed mmap-able memory ranges, generic fbdev emulation + * uses a shadow buffer in system memory. The implementation blits the shadow + * fbdev buffer onto the real buffer in regular intervals. * * This function is safe to call even when there are no connectors present. * Setup will be retried on the next hotplug event. * * The fbdev is destroyed by drm_dev_unregister(). */ -void drm_fbdev_generic_setup(struct drm_device *dev, - unsigned int preferred_bpp) +void drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp) { struct drm_fb_helper *fb_helper; int ret; @@ -439,15 +332,15 @@ void drm_fbdev_generic_setup(struct drm_device *dev, fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); if (!fb_helper) return; - drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fb_helper_generic_funcs); + drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fbdev_generic_helper_funcs); - ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs); + ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_generic_client_funcs); if (ret) { drm_err(dev, "Failed to register client: %d\n", ret); goto err_drm_client_init; } - ret = drm_fbdev_client_hotplug(&fb_helper->client); + ret = drm_fbdev_generic_client_hotplug(&fb_helper->client); if (ret) drm_dbg_kms(dev, "client hotplug ret=%d\n", ret); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index ee3e11e7177d..a6208e2c089b 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -1381,10 +1381,13 @@ EXPORT_SYMBOL(drm_gem_lru_move_tail); * * @lru: The LRU to scan * @nr_to_scan: The number of pages to try to reclaim + * @remaining: The number of pages left to reclaim, should be initialized by caller * @shrink: Callback to try to shrink/reclaim the object. */ unsigned long -drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan, +drm_gem_lru_scan(struct drm_gem_lru *lru, + unsigned int nr_to_scan, + unsigned long *remaining, bool (*shrink)(struct drm_gem_object *obj)) { struct drm_gem_lru still_in_lru; @@ -1423,8 +1426,10 @@ drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan, * hit shrinker in response to trying to get backing pages * for this obj (ie. while it's lock is already held) */ - if (!dma_resv_trylock(obj->resv)) + if (!dma_resv_trylock(obj->resv)) { + *remaining += obj->size >> PAGE_SHIFT; goto tail; + } if (shrink(obj)) { freed += obj->size >> PAGE_SHIFT; diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 9b0d540ff4a8..4ea6507a77e5 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -623,11 +623,14 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct int ret; if (obj->import_attach) { - /* Drop the reference drm_gem_mmap_obj() acquired.*/ - drm_gem_object_put(obj); vma->vm_private_data = NULL; + ret = dma_buf_mmap(obj->dma_buf, vma, 0); + + /* Drop the reference drm_gem_mmap_obj() acquired.*/ + if (!ret) + drm_gem_object_put(obj); - return dma_buf_mmap(obj->dma_buf, vma, 0); + return ret; } ret = drm_gem_shmem_get_pages(shmem); diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index b41aaf2bb9f1..7900a4707d7c 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -329,7 +329,7 @@ int mipi_dsi_host_register(struct mipi_dsi_host *host) for_each_available_child_of_node(host->dev->of_node, node) { /* skip nodes without reg property */ - if (!of_find_property(node, "reg", NULL)) + if (!of_property_present(node, "reg")) continue; of_mipi_dsi_device_add(host, node); } diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 5522d610c5cf..b1a38e6ce2f8 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -328,10 +328,17 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"), }, .driver_data = (void *)&lcd1200x1920_rightside_up, - }, { /* Lenovo Yoga Book X90F / X91F / X91L */ + }, { /* Lenovo Yoga Book X90F / X90L */ .matches = { - /* Non exact match to match all versions */ - DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"), + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"), + }, + .driver_data = (void *)&lcd1200x1920_rightside_up, + }, { /* Lenovo Yoga Book X91F / X91L */ + .matches = { + /* Non exact match to match F + L versions */ + DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"), }, .driver_data = (void *)&lcd1200x1920_rightside_up, }, { /* Lenovo Yoga Tablet 2 830F / 830L */ diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index f924b8b4ab6b..149cd4ff6a3b 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -285,7 +285,7 @@ EXPORT_SYMBOL(drm_gem_dmabuf_release); /** * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers - * @dev: dev to export the buffer from + * @dev: drm_device to import into * @file_priv: drm file-private structure * @prime_fd: fd id of the dma-buf which should be imported * @handle: pointer to storage for the handle of the imported buffer object @@ -925,7 +925,7 @@ struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev, obj = dma_buf->priv; if (obj->dev == dev) { /* - * Importing dmabuf exported from out own gem increases + * Importing dmabuf exported from our own gem increases * refcount on gem itself instead of f_count of dmabuf. */ drm_gem_object_get(obj); diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index 2ff31717a3de..299fa2a19a90 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -844,10 +844,9 @@ bool drm_crtc_vblank_helper_get_vblank_timestamp(struct drm_crtc *crtc, EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp); /** - * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent - * vblank interval - * @dev: DRM device - * @pipe: index of CRTC whose vblank timestamp to retrieve + * drm_crtc_get_last_vbltimestamp - retrieve raw timestamp for the most + * recent vblank interval + * @crtc: CRTC whose vblank timestamp to retrieve * @tvblank: Pointer to target time which should receive the timestamp * @in_vblank_irq: * True when called from drm_crtc_handle_vblank(). Some drivers @@ -865,10 +864,9 @@ EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp); * True if timestamp is considered to be very precise, false otherwise. */ static bool -drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, - ktime_t *tvblank, bool in_vblank_irq) +drm_crtc_get_last_vbltimestamp(struct drm_crtc *crtc, ktime_t *tvblank, + bool in_vblank_irq) { - struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); bool ret = false; /* Define requested maximum error on timestamps (nanoseconds). */ @@ -876,8 +874,6 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, /* Query driver if possible and precision timestamping enabled. */ if (crtc && crtc->funcs->get_vblank_timestamp && max_error > 0) { - struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); - ret = crtc->funcs->get_vblank_timestamp(crtc, &max_error, tvblank, in_vblank_irq); } @@ -891,6 +887,15 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, return ret; } +static bool +drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, + ktime_t *tvblank, bool in_vblank_irq) +{ + struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); + + return drm_crtc_get_last_vbltimestamp(crtc, tvblank, in_vblank_irq); +} + /** * drm_crtc_vblank_count - retrieve "cooked" vblank counter value * @crtc: which counter to retrieve @@ -980,6 +985,36 @@ u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, } EXPORT_SYMBOL(drm_crtc_vblank_count_and_time); +/** + * drm_crtc_next_vblank_start - calculate the time of the next vblank + * @crtc: the crtc for which to calculate next vblank time + * @vblanktime: pointer to time to receive the next vblank timestamp. + * + * Calculate the expected time of the start of the next vblank period, + * based on time of previous vblank and frame duration + */ +int drm_crtc_next_vblank_start(struct drm_crtc *crtc, ktime_t *vblanktime) +{ + unsigned int pipe = drm_crtc_index(crtc); + struct drm_vblank_crtc *vblank = &crtc->dev->vblank[pipe]; + struct drm_display_mode *mode = &vblank->hwmode; + u64 vblank_start; + + if (!vblank->framedur_ns || !vblank->linedur_ns) + return -EINVAL; + + if (!drm_crtc_get_last_vbltimestamp(crtc, vblanktime, false)) + return -EINVAL; + + vblank_start = DIV_ROUND_DOWN_ULL( + (u64)vblank->framedur_ns * mode->crtc_vblank_start, + mode->crtc_vtotal); + *vblanktime = ktime_add(*vblanktime, ns_to_ktime(vblank_start)); + + return 0; +} +EXPORT_SYMBOL(drm_crtc_next_vblank_start); + static void send_vblank_event(struct drm_device *dev, struct drm_pending_vblank_event *e, u64 seq, ktime_t now) diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 4049fa4273ab..0cb92d651ff1 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -59,6 +59,7 @@ config DRM_EXYNOS_DSI depends on DRM_EXYNOS_FIMD || DRM_EXYNOS5433_DECON || DRM_EXYNOS7_DECON select DRM_MIPI_DSI select DRM_PANEL + select DRM_SAMSUNG_DSIM default n help This enables support for Exynos MIPI-DSI device. diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 06d6513ddaae..fc81f728e6ba 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1,1521 +1,56 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Samsung SoC MIPI DSI Master driver. + * Samsung MIPI DSIM glue for Exynos SoCs. * * Copyright (c) 2014 Samsung Electronics Co., Ltd * * Contacts: Tomasz Figa <t.figa@samsung.com> -*/ + */ -#include <linux/clk.h> -#include <linux/delay.h> #include <linux/component.h> -#include <linux/gpio/consumer.h> -#include <linux/irq.h> #include <linux/of_device.h> -#include <linux/of_graph.h> -#include <linux/phy/phy.h> -#include <linux/regulator/consumer.h> - -#include <asm/unaligned.h> -#include <video/mipi_display.h> -#include <video/videomode.h> - -#include <drm/drm_atomic_helper.h> -#include <drm/drm_bridge.h> -#include <drm/drm_mipi_dsi.h> -#include <drm/drm_panel.h> -#include <drm/drm_print.h> +#include <drm/bridge/samsung-dsim.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> #include "exynos_drm_crtc.h" #include "exynos_drm_drv.h" -/* returns true iff both arguments logically differs */ -#define NEQV(a, b) (!(a) ^ !(b)) - -/* DSIM_STATUS */ -#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0) -#define DSIM_STOP_STATE_CLK (1 << 8) -#define DSIM_TX_READY_HS_CLK (1 << 10) -#define DSIM_PLL_STABLE (1 << 31) - -/* DSIM_SWRST */ -#define DSIM_FUNCRST (1 << 16) -#define DSIM_SWRST (1 << 0) - -/* DSIM_TIMEOUT */ -#define DSIM_LPDR_TIMEOUT(x) ((x) << 0) -#define DSIM_BTA_TIMEOUT(x) ((x) << 16) - -/* DSIM_CLKCTRL */ -#define DSIM_ESC_PRESCALER(x) (((x) & 0xffff) << 0) -#define DSIM_ESC_PRESCALER_MASK (0xffff << 0) -#define DSIM_LANE_ESC_CLK_EN_CLK (1 << 19) -#define DSIM_LANE_ESC_CLK_EN_DATA(x) (((x) & 0xf) << 20) -#define DSIM_LANE_ESC_CLK_EN_DATA_MASK (0xf << 20) -#define DSIM_BYTE_CLKEN (1 << 24) -#define DSIM_BYTE_CLK_SRC(x) (((x) & 0x3) << 25) -#define DSIM_BYTE_CLK_SRC_MASK (0x3 << 25) -#define DSIM_PLL_BYPASS (1 << 27) -#define DSIM_ESC_CLKEN (1 << 28) -#define DSIM_TX_REQUEST_HSCLK (1 << 31) - -/* DSIM_CONFIG */ -#define DSIM_LANE_EN_CLK (1 << 0) -#define DSIM_LANE_EN(x) (((x) & 0xf) << 1) -#define DSIM_NUM_OF_DATA_LANE(x) (((x) & 0x3) << 5) -#define DSIM_SUB_PIX_FORMAT(x) (((x) & 0x7) << 8) -#define DSIM_MAIN_PIX_FORMAT_MASK (0x7 << 12) -#define DSIM_MAIN_PIX_FORMAT_RGB888 (0x7 << 12) -#define DSIM_MAIN_PIX_FORMAT_RGB666 (0x6 << 12) -#define DSIM_MAIN_PIX_FORMAT_RGB666_P (0x5 << 12) -#define DSIM_MAIN_PIX_FORMAT_RGB565 (0x4 << 12) -#define DSIM_SUB_VC (((x) & 0x3) << 16) -#define DSIM_MAIN_VC (((x) & 0x3) << 18) -#define DSIM_HSA_DISABLE_MODE (1 << 20) -#define DSIM_HBP_DISABLE_MODE (1 << 21) -#define DSIM_HFP_DISABLE_MODE (1 << 22) -/* - * The i.MX 8M Mini Applications Processor Reference Manual, - * Rev. 3, 11/2020 Page 4091 - * The i.MX 8M Nano Applications Processor Reference Manual, - * Rev. 2, 07/2022 Page 3058 - * The i.MX 8M Plus Applications Processor Reference Manual, - * Rev. 1, 06/2021 Page 5436 - * named this bit as 'HseDisableMode' but the bit definition - * is quite opposite like - * 0 = Disables transfer - * 1 = Enables transfer - * which clearly states that HSE is not a disable bit. - * - * This bit is named as per the manual even though it is not - * a disable bit however the driver logic for handling HSE - * is based on the MIPI_DSI_MODE_VIDEO_HSE flag itself. - */ -#define DSIM_HSE_DISABLE_MODE (1 << 23) -#define DSIM_AUTO_MODE (1 << 24) -#define DSIM_VIDEO_MODE (1 << 25) -#define DSIM_BURST_MODE (1 << 26) -#define DSIM_SYNC_INFORM (1 << 27) -#define DSIM_EOT_DISABLE (1 << 28) -#define DSIM_MFLUSH_VS (1 << 29) -/* This flag is valid only for exynos3250/3472/5260/5430 */ -#define DSIM_CLKLANE_STOP (1 << 30) - -/* DSIM_ESCMODE */ -#define DSIM_TX_TRIGGER_RST (1 << 4) -#define DSIM_TX_LPDT_LP (1 << 6) -#define DSIM_CMD_LPDT_LP (1 << 7) -#define DSIM_FORCE_BTA (1 << 16) -#define DSIM_FORCE_STOP_STATE (1 << 20) -#define DSIM_STOP_STATE_CNT(x) (((x) & 0x7ff) << 21) -#define DSIM_STOP_STATE_CNT_MASK (0x7ff << 21) - -/* DSIM_MDRESOL */ -#define DSIM_MAIN_STAND_BY (1 << 31) -#define DSIM_MAIN_VRESOL(x, num_bits) (((x) & ((1 << (num_bits)) - 1)) << 16) -#define DSIM_MAIN_HRESOL(x, num_bits) (((x) & ((1 << (num_bits)) - 1)) << 0) - -/* DSIM_MVPORCH */ -#define DSIM_CMD_ALLOW(x) ((x) << 28) -#define DSIM_STABLE_VFP(x) ((x) << 16) -#define DSIM_MAIN_VBP(x) ((x) << 0) -#define DSIM_CMD_ALLOW_MASK (0xf << 28) -#define DSIM_STABLE_VFP_MASK (0x7ff << 16) -#define DSIM_MAIN_VBP_MASK (0x7ff << 0) - -/* DSIM_MHPORCH */ -#define DSIM_MAIN_HFP(x) ((x) << 16) -#define DSIM_MAIN_HBP(x) ((x) << 0) -#define DSIM_MAIN_HFP_MASK ((0xffff) << 16) -#define DSIM_MAIN_HBP_MASK ((0xffff) << 0) - -/* DSIM_MSYNC */ -#define DSIM_MAIN_VSA(x) ((x) << 22) -#define DSIM_MAIN_HSA(x) ((x) << 0) -#define DSIM_MAIN_VSA_MASK ((0x3ff) << 22) -#define DSIM_MAIN_HSA_MASK ((0xffff) << 0) - -/* DSIM_SDRESOL */ -#define DSIM_SUB_STANDY(x) ((x) << 31) -#define DSIM_SUB_VRESOL(x) ((x) << 16) -#define DSIM_SUB_HRESOL(x) ((x) << 0) -#define DSIM_SUB_STANDY_MASK ((0x1) << 31) -#define DSIM_SUB_VRESOL_MASK ((0x7ff) << 16) -#define DSIM_SUB_HRESOL_MASK ((0x7ff) << 0) - -/* DSIM_INTSRC */ -#define DSIM_INT_PLL_STABLE (1 << 31) -#define DSIM_INT_SW_RST_RELEASE (1 << 30) -#define DSIM_INT_SFR_FIFO_EMPTY (1 << 29) -#define DSIM_INT_SFR_HDR_FIFO_EMPTY (1 << 28) -#define DSIM_INT_BTA (1 << 25) -#define DSIM_INT_FRAME_DONE (1 << 24) -#define DSIM_INT_RX_TIMEOUT (1 << 21) -#define DSIM_INT_BTA_TIMEOUT (1 << 20) -#define DSIM_INT_RX_DONE (1 << 18) -#define DSIM_INT_RX_TE (1 << 17) -#define DSIM_INT_RX_ACK (1 << 16) -#define DSIM_INT_RX_ECC_ERR (1 << 15) -#define DSIM_INT_RX_CRC_ERR (1 << 14) - -/* DSIM_FIFOCTRL */ -#define DSIM_RX_DATA_FULL (1 << 25) -#define DSIM_RX_DATA_EMPTY (1 << 24) -#define DSIM_SFR_HEADER_FULL (1 << 23) -#define DSIM_SFR_HEADER_EMPTY (1 << 22) -#define DSIM_SFR_PAYLOAD_FULL (1 << 21) -#define DSIM_SFR_PAYLOAD_EMPTY (1 << 20) -#define DSIM_I80_HEADER_FULL (1 << 19) -#define DSIM_I80_HEADER_EMPTY (1 << 18) -#define DSIM_I80_PAYLOAD_FULL (1 << 17) -#define DSIM_I80_PAYLOAD_EMPTY (1 << 16) -#define DSIM_SD_HEADER_FULL (1 << 15) -#define DSIM_SD_HEADER_EMPTY (1 << 14) -#define DSIM_SD_PAYLOAD_FULL (1 << 13) -#define DSIM_SD_PAYLOAD_EMPTY (1 << 12) -#define DSIM_MD_HEADER_FULL (1 << 11) -#define DSIM_MD_HEADER_EMPTY (1 << 10) -#define DSIM_MD_PAYLOAD_FULL (1 << 9) -#define DSIM_MD_PAYLOAD_EMPTY (1 << 8) -#define DSIM_RX_FIFO (1 << 4) -#define DSIM_SFR_FIFO (1 << 3) -#define DSIM_I80_FIFO (1 << 2) -#define DSIM_SD_FIFO (1 << 1) -#define DSIM_MD_FIFO (1 << 0) - -/* DSIM_PHYACCHR */ -#define DSIM_AFC_EN (1 << 14) -#define DSIM_AFC_CTL(x) (((x) & 0x7) << 5) - -/* DSIM_PLLCTRL */ -#define DSIM_FREQ_BAND(x) ((x) << 24) -#define DSIM_PLL_EN (1 << 23) -#define DSIM_PLL_P(x) ((x) << 13) -#define DSIM_PLL_M(x) ((x) << 4) -#define DSIM_PLL_S(x) ((x) << 1) - -/* DSIM_PHYCTRL */ -#define DSIM_PHYCTRL_ULPS_EXIT(x) (((x) & 0x1ff) << 0) -#define DSIM_PHYCTRL_B_DPHYCTL_VREG_LP (1 << 30) -#define DSIM_PHYCTRL_B_DPHYCTL_SLEW_UP (1 << 14) - -/* DSIM_PHYTIMING */ -#define DSIM_PHYTIMING_LPX(x) ((x) << 8) -#define DSIM_PHYTIMING_HS_EXIT(x) ((x) << 0) - -/* DSIM_PHYTIMING1 */ -#define DSIM_PHYTIMING1_CLK_PREPARE(x) ((x) << 24) -#define DSIM_PHYTIMING1_CLK_ZERO(x) ((x) << 16) -#define DSIM_PHYTIMING1_CLK_POST(x) ((x) << 8) -#define DSIM_PHYTIMING1_CLK_TRAIL(x) ((x) << 0) - -/* DSIM_PHYTIMING2 */ -#define DSIM_PHYTIMING2_HS_PREPARE(x) ((x) << 16) -#define DSIM_PHYTIMING2_HS_ZERO(x) ((x) << 8) -#define DSIM_PHYTIMING2_HS_TRAIL(x) ((x) << 0) - -#define DSI_MAX_BUS_WIDTH 4 -#define DSI_NUM_VIRTUAL_CHANNELS 4 -#define DSI_TX_FIFO_SIZE 2048 -#define DSI_RX_FIFO_SIZE 256 -#define DSI_XFER_TIMEOUT_MS 100 -#define DSI_RX_FIFO_EMPTY 0x30800002 - -#define OLD_SCLK_MIPI_CLK_NAME "pll_clk" - -static const char *const clk_names[5] = { "bus_clk", "sclk_mipi", - "phyclk_mipidphy0_bitclkdiv8", "phyclk_mipidphy0_rxclkesc0", - "sclk_rgb_vclk_to_dsim0" }; - -enum exynos_dsi_transfer_type { - EXYNOS_DSI_TX, - EXYNOS_DSI_RX, -}; - -struct exynos_dsi_transfer { - struct list_head list; - struct completion completed; - int result; - struct mipi_dsi_packet packet; - u16 flags; - u16 tx_done; - - u8 *rx_payload; - u16 rx_len; - u16 rx_done; -}; - -#define DSIM_STATE_ENABLED BIT(0) -#define DSIM_STATE_INITIALIZED BIT(1) -#define DSIM_STATE_CMD_LPM BIT(2) -#define DSIM_STATE_VIDOUT_AVAILABLE BIT(3) - -struct exynos_dsi_driver_data { - const unsigned int *reg_ofs; - unsigned int plltmr_reg; - unsigned int has_freqband:1; - unsigned int has_clklane_stop:1; - unsigned int num_clks; - unsigned int max_freq; - unsigned int wait_for_reset; - unsigned int num_bits_resol; - const unsigned int *reg_values; -}; - struct exynos_dsi { struct drm_encoder encoder; - struct mipi_dsi_host dsi_host; - struct drm_bridge bridge; - struct drm_bridge *out_bridge; - struct device *dev; - struct drm_display_mode mode; - - void __iomem *reg_base; - struct phy *phy; - struct clk **clks; - struct regulator_bulk_data supplies[2]; - int irq; - struct gpio_desc *te_gpio; - - u32 pll_clk_rate; - u32 burst_clk_rate; - u32 esc_clk_rate; - u32 lanes; - u32 mode_flags; - u32 format; - - int state; - struct drm_property *brightness; - struct completion completed; - - spinlock_t transfer_lock; /* protects transfer_list */ - struct list_head transfer_list; - - const struct exynos_dsi_driver_data *driver_data; -}; - -#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host) - -static inline struct exynos_dsi *bridge_to_dsi(struct drm_bridge *b) -{ - return container_of(b, struct exynos_dsi, bridge); -} - -enum reg_idx { - DSIM_STATUS_REG, /* Status register */ - DSIM_SWRST_REG, /* Software reset register */ - DSIM_CLKCTRL_REG, /* Clock control register */ - DSIM_TIMEOUT_REG, /* Time out register */ - DSIM_CONFIG_REG, /* Configuration register */ - DSIM_ESCMODE_REG, /* Escape mode register */ - DSIM_MDRESOL_REG, - DSIM_MVPORCH_REG, /* Main display Vporch register */ - DSIM_MHPORCH_REG, /* Main display Hporch register */ - DSIM_MSYNC_REG, /* Main display sync area register */ - DSIM_INTSRC_REG, /* Interrupt source register */ - DSIM_INTMSK_REG, /* Interrupt mask register */ - DSIM_PKTHDR_REG, /* Packet Header FIFO register */ - DSIM_PAYLOAD_REG, /* Payload FIFO register */ - DSIM_RXFIFO_REG, /* Read FIFO register */ - DSIM_FIFOCTRL_REG, /* FIFO status and control register */ - DSIM_PLLCTRL_REG, /* PLL control register */ - DSIM_PHYCTRL_REG, - DSIM_PHYTIMING_REG, - DSIM_PHYTIMING1_REG, - DSIM_PHYTIMING2_REG, - NUM_REGS -}; - -static inline void exynos_dsi_write(struct exynos_dsi *dsi, enum reg_idx idx, - u32 val) -{ - - writel(val, dsi->reg_base + dsi->driver_data->reg_ofs[idx]); -} - -static inline u32 exynos_dsi_read(struct exynos_dsi *dsi, enum reg_idx idx) -{ - return readl(dsi->reg_base + dsi->driver_data->reg_ofs[idx]); -} - -static const unsigned int exynos_reg_ofs[] = { - [DSIM_STATUS_REG] = 0x00, - [DSIM_SWRST_REG] = 0x04, - [DSIM_CLKCTRL_REG] = 0x08, - [DSIM_TIMEOUT_REG] = 0x0c, - [DSIM_CONFIG_REG] = 0x10, - [DSIM_ESCMODE_REG] = 0x14, - [DSIM_MDRESOL_REG] = 0x18, - [DSIM_MVPORCH_REG] = 0x1c, - [DSIM_MHPORCH_REG] = 0x20, - [DSIM_MSYNC_REG] = 0x24, - [DSIM_INTSRC_REG] = 0x2c, - [DSIM_INTMSK_REG] = 0x30, - [DSIM_PKTHDR_REG] = 0x34, - [DSIM_PAYLOAD_REG] = 0x38, - [DSIM_RXFIFO_REG] = 0x3c, - [DSIM_FIFOCTRL_REG] = 0x44, - [DSIM_PLLCTRL_REG] = 0x4c, - [DSIM_PHYCTRL_REG] = 0x5c, - [DSIM_PHYTIMING_REG] = 0x64, - [DSIM_PHYTIMING1_REG] = 0x68, - [DSIM_PHYTIMING2_REG] = 0x6c, -}; - -static const unsigned int exynos5433_reg_ofs[] = { - [DSIM_STATUS_REG] = 0x04, - [DSIM_SWRST_REG] = 0x0C, - [DSIM_CLKCTRL_REG] = 0x10, - [DSIM_TIMEOUT_REG] = 0x14, - [DSIM_CONFIG_REG] = 0x18, - [DSIM_ESCMODE_REG] = 0x1C, - [DSIM_MDRESOL_REG] = 0x20, - [DSIM_MVPORCH_REG] = 0x24, - [DSIM_MHPORCH_REG] = 0x28, - [DSIM_MSYNC_REG] = 0x2C, - [DSIM_INTSRC_REG] = 0x34, - [DSIM_INTMSK_REG] = 0x38, - [DSIM_PKTHDR_REG] = 0x3C, - [DSIM_PAYLOAD_REG] = 0x40, - [DSIM_RXFIFO_REG] = 0x44, - [DSIM_FIFOCTRL_REG] = 0x4C, - [DSIM_PLLCTRL_REG] = 0x94, - [DSIM_PHYCTRL_REG] = 0xA4, - [DSIM_PHYTIMING_REG] = 0xB4, - [DSIM_PHYTIMING1_REG] = 0xB8, - [DSIM_PHYTIMING2_REG] = 0xBC, -}; - -enum reg_value_idx { - RESET_TYPE, - PLL_TIMER, - STOP_STATE_CNT, - PHYCTRL_ULPS_EXIT, - PHYCTRL_VREG_LP, - PHYCTRL_SLEW_UP, - PHYTIMING_LPX, - PHYTIMING_HS_EXIT, - PHYTIMING_CLK_PREPARE, - PHYTIMING_CLK_ZERO, - PHYTIMING_CLK_POST, - PHYTIMING_CLK_TRAIL, - PHYTIMING_HS_PREPARE, - PHYTIMING_HS_ZERO, - PHYTIMING_HS_TRAIL -}; - -static const unsigned int reg_values[] = { - [RESET_TYPE] = DSIM_SWRST, - [PLL_TIMER] = 500, - [STOP_STATE_CNT] = 0xf, - [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x0af), - [PHYCTRL_VREG_LP] = 0, - [PHYCTRL_SLEW_UP] = 0, - [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x06), - [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0b), - [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x07), - [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x27), - [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0d), - [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x08), - [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x09), - [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x0d), - [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0b), -}; - -static const unsigned int exynos5422_reg_values[] = { - [RESET_TYPE] = DSIM_SWRST, - [PLL_TIMER] = 500, - [STOP_STATE_CNT] = 0xf, - [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0xaf), - [PHYCTRL_VREG_LP] = 0, - [PHYCTRL_SLEW_UP] = 0, - [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x08), - [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0d), - [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x09), - [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x30), - [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0e), - [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x0a), - [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x0c), - [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x11), - [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0d), -}; - -static const unsigned int exynos5433_reg_values[] = { - [RESET_TYPE] = DSIM_FUNCRST, - [PLL_TIMER] = 22200, - [STOP_STATE_CNT] = 0xa, - [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x190), - [PHYCTRL_VREG_LP] = DSIM_PHYCTRL_B_DPHYCTL_VREG_LP, - [PHYCTRL_SLEW_UP] = DSIM_PHYCTRL_B_DPHYCTL_SLEW_UP, - [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x07), - [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0c), - [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x09), - [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x2d), - [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0e), - [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x09), - [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x0b), - [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x10), - [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0c), -}; - -static const struct exynos_dsi_driver_data exynos3_dsi_driver_data = { - .reg_ofs = exynos_reg_ofs, - .plltmr_reg = 0x50, - .has_freqband = 1, - .has_clklane_stop = 1, - .num_clks = 2, - .max_freq = 1000, - .wait_for_reset = 1, - .num_bits_resol = 11, - .reg_values = reg_values, }; -static const struct exynos_dsi_driver_data exynos4_dsi_driver_data = { - .reg_ofs = exynos_reg_ofs, - .plltmr_reg = 0x50, - .has_freqband = 1, - .has_clklane_stop = 1, - .num_clks = 2, - .max_freq = 1000, - .wait_for_reset = 1, - .num_bits_resol = 11, - .reg_values = reg_values, -}; - -static const struct exynos_dsi_driver_data exynos5_dsi_driver_data = { - .reg_ofs = exynos_reg_ofs, - .plltmr_reg = 0x58, - .num_clks = 2, - .max_freq = 1000, - .wait_for_reset = 1, - .num_bits_resol = 11, - .reg_values = reg_values, -}; - -static const struct exynos_dsi_driver_data exynos5433_dsi_driver_data = { - .reg_ofs = exynos5433_reg_ofs, - .plltmr_reg = 0xa0, - .has_clklane_stop = 1, - .num_clks = 5, - .max_freq = 1500, - .wait_for_reset = 0, - .num_bits_resol = 12, - .reg_values = exynos5433_reg_values, -}; - -static const struct exynos_dsi_driver_data exynos5422_dsi_driver_data = { - .reg_ofs = exynos5433_reg_ofs, - .plltmr_reg = 0xa0, - .has_clklane_stop = 1, - .num_clks = 2, - .max_freq = 1500, - .wait_for_reset = 1, - .num_bits_resol = 12, - .reg_values = exynos5422_reg_values, -}; - -static const struct of_device_id exynos_dsi_of_match[] = { - { .compatible = "samsung,exynos3250-mipi-dsi", - .data = &exynos3_dsi_driver_data }, - { .compatible = "samsung,exynos4210-mipi-dsi", - .data = &exynos4_dsi_driver_data }, - { .compatible = "samsung,exynos5410-mipi-dsi", - .data = &exynos5_dsi_driver_data }, - { .compatible = "samsung,exynos5422-mipi-dsi", - .data = &exynos5422_dsi_driver_data }, - { .compatible = "samsung,exynos5433-mipi-dsi", - .data = &exynos5433_dsi_driver_data }, - { } -}; - -static void exynos_dsi_wait_for_reset(struct exynos_dsi *dsi) -{ - if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300))) - return; - - dev_err(dsi->dev, "timeout waiting for reset\n"); -} - -static void exynos_dsi_reset(struct exynos_dsi *dsi) -{ - u32 reset_val = dsi->driver_data->reg_values[RESET_TYPE]; - - reinit_completion(&dsi->completed); - exynos_dsi_write(dsi, DSIM_SWRST_REG, reset_val); -} - -#ifndef MHZ -#define MHZ (1000*1000) -#endif - -static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi, - unsigned long fin, unsigned long fout, u8 *p, u16 *m, u8 *s) -{ - const struct exynos_dsi_driver_data *driver_data = dsi->driver_data; - unsigned long best_freq = 0; - u32 min_delta = 0xffffffff; - u8 p_min, p_max; - u8 _p, best_p; - u16 _m, best_m; - u8 _s, best_s; - - p_min = DIV_ROUND_UP(fin, (12 * MHZ)); - p_max = fin / (6 * MHZ); - - for (_p = p_min; _p <= p_max; ++_p) { - for (_s = 0; _s <= 5; ++_s) { - u64 tmp; - u32 delta; - - tmp = (u64)fout * (_p << _s); - do_div(tmp, fin); - _m = tmp; - if (_m < 41 || _m > 125) - continue; - - tmp = (u64)_m * fin; - do_div(tmp, _p); - if (tmp < 500 * MHZ || - tmp > driver_data->max_freq * MHZ) - continue; - - tmp = (u64)_m * fin; - do_div(tmp, _p << _s); - - delta = abs(fout - tmp); - if (delta < min_delta) { - best_p = _p; - best_m = _m; - best_s = _s; - min_delta = delta; - best_freq = tmp; - } - } - } - - if (best_freq) { - *p = best_p; - *m = best_m; - *s = best_s; - } - - return best_freq; -} - -static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi, - unsigned long freq) -{ - const struct exynos_dsi_driver_data *driver_data = dsi->driver_data; - unsigned long fin, fout; - int timeout; - u8 p, s; - u16 m; - u32 reg; - - fin = dsi->pll_clk_rate; - fout = exynos_dsi_pll_find_pms(dsi, fin, freq, &p, &m, &s); - if (!fout) { - dev_err(dsi->dev, - "failed to find PLL PMS for requested frequency\n"); - return 0; - } - dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s); - - writel(driver_data->reg_values[PLL_TIMER], - dsi->reg_base + driver_data->plltmr_reg); - - reg = DSIM_PLL_EN | DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s); - - if (driver_data->has_freqband) { - static const unsigned long freq_bands[] = { - 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ, - 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ, - 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ, - 770 * MHZ, 870 * MHZ, 950 * MHZ, - }; - int band; - - for (band = 0; band < ARRAY_SIZE(freq_bands); ++band) - if (fout < freq_bands[band]) - break; - - dev_dbg(dsi->dev, "band %d\n", band); - - reg |= DSIM_FREQ_BAND(band); - } - - exynos_dsi_write(dsi, DSIM_PLLCTRL_REG, reg); - - timeout = 1000; - do { - if (timeout-- == 0) { - dev_err(dsi->dev, "PLL failed to stabilize\n"); - return 0; - } - reg = exynos_dsi_read(dsi, DSIM_STATUS_REG); - } while ((reg & DSIM_PLL_STABLE) == 0); - - return fout; -} - -static int exynos_dsi_enable_clock(struct exynos_dsi *dsi) -{ - unsigned long hs_clk, byte_clk, esc_clk; - unsigned long esc_div; - u32 reg; - - hs_clk = exynos_dsi_set_pll(dsi, dsi->burst_clk_rate); - if (!hs_clk) { - dev_err(dsi->dev, "failed to configure DSI PLL\n"); - return -EFAULT; - } - - byte_clk = hs_clk / 8; - esc_div = DIV_ROUND_UP(byte_clk, dsi->esc_clk_rate); - esc_clk = byte_clk / esc_div; - - if (esc_clk > 20 * MHZ) { - ++esc_div; - esc_clk = byte_clk / esc_div; - } - - dev_dbg(dsi->dev, "hs_clk = %lu, byte_clk = %lu, esc_clk = %lu\n", - hs_clk, byte_clk, esc_clk); - - reg = exynos_dsi_read(dsi, DSIM_CLKCTRL_REG); - reg &= ~(DSIM_ESC_PRESCALER_MASK | DSIM_LANE_ESC_CLK_EN_CLK - | DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_PLL_BYPASS - | DSIM_BYTE_CLK_SRC_MASK); - reg |= DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN - | DSIM_ESC_PRESCALER(esc_div) - | DSIM_LANE_ESC_CLK_EN_CLK - | DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1) - | DSIM_BYTE_CLK_SRC(0) - | DSIM_TX_REQUEST_HSCLK; - exynos_dsi_write(dsi, DSIM_CLKCTRL_REG, reg); - - return 0; -} - -static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi) -{ - const struct exynos_dsi_driver_data *driver_data = dsi->driver_data; - const unsigned int *reg_values = driver_data->reg_values; - u32 reg; - - if (driver_data->has_freqband) - return; - - /* B D-PHY: D-PHY Master & Slave Analog Block control */ - reg = reg_values[PHYCTRL_ULPS_EXIT] | reg_values[PHYCTRL_VREG_LP] | - reg_values[PHYCTRL_SLEW_UP]; - exynos_dsi_write(dsi, DSIM_PHYCTRL_REG, reg); - - /* - * T LPX: Transmitted length of any Low-Power state period - * T HS-EXIT: Time that the transmitter drives LP-11 following a HS - * burst - */ - reg = reg_values[PHYTIMING_LPX] | reg_values[PHYTIMING_HS_EXIT]; - exynos_dsi_write(dsi, DSIM_PHYTIMING_REG, reg); - - /* - * T CLK-PREPARE: Time that the transmitter drives the Clock Lane LP-00 - * Line state immediately before the HS-0 Line state starting the - * HS transmission - * T CLK-ZERO: Time that the transmitter drives the HS-0 state prior to - * transmitting the Clock. - * T CLK_POST: Time that the transmitter continues to send HS clock - * after the last associated Data Lane has transitioned to LP Mode - * Interval is defined as the period from the end of T HS-TRAIL to - * the beginning of T CLK-TRAIL - * T CLK-TRAIL: Time that the transmitter drives the HS-0 state after - * the last payload clock bit of a HS transmission burst - */ - reg = reg_values[PHYTIMING_CLK_PREPARE] | - reg_values[PHYTIMING_CLK_ZERO] | - reg_values[PHYTIMING_CLK_POST] | - reg_values[PHYTIMING_CLK_TRAIL]; - - exynos_dsi_write(dsi, DSIM_PHYTIMING1_REG, reg); - - /* - * T HS-PREPARE: Time that the transmitter drives the Data Lane LP-00 - * Line state immediately before the HS-0 Line state starting the - * HS transmission - * T HS-ZERO: Time that the transmitter drives the HS-0 state prior to - * transmitting the Sync sequence. - * T HS-TRAIL: Time that the transmitter drives the flipped differential - * state after last payload data bit of a HS transmission burst - */ - reg = reg_values[PHYTIMING_HS_PREPARE] | reg_values[PHYTIMING_HS_ZERO] | - reg_values[PHYTIMING_HS_TRAIL]; - exynos_dsi_write(dsi, DSIM_PHYTIMING2_REG, reg); -} - -static void exynos_dsi_disable_clock(struct exynos_dsi *dsi) -{ - u32 reg; - - reg = exynos_dsi_read(dsi, DSIM_CLKCTRL_REG); - reg &= ~(DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK - | DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN); - exynos_dsi_write(dsi, DSIM_CLKCTRL_REG, reg); - - reg = exynos_dsi_read(dsi, DSIM_PLLCTRL_REG); - reg &= ~DSIM_PLL_EN; - exynos_dsi_write(dsi, DSIM_PLLCTRL_REG, reg); -} - -static void exynos_dsi_enable_lane(struct exynos_dsi *dsi, u32 lane) -{ - u32 reg = exynos_dsi_read(dsi, DSIM_CONFIG_REG); - reg |= (DSIM_NUM_OF_DATA_LANE(dsi->lanes - 1) | DSIM_LANE_EN_CLK | - DSIM_LANE_EN(lane)); - exynos_dsi_write(dsi, DSIM_CONFIG_REG, reg); -} - -static int exynos_dsi_init_link(struct exynos_dsi *dsi) -{ - const struct exynos_dsi_driver_data *driver_data = dsi->driver_data; - int timeout; - u32 reg; - u32 lanes_mask; - - /* Initialize FIFO pointers */ - reg = exynos_dsi_read(dsi, DSIM_FIFOCTRL_REG); - reg &= ~0x1f; - exynos_dsi_write(dsi, DSIM_FIFOCTRL_REG, reg); - - usleep_range(9000, 11000); - - reg |= 0x1f; - exynos_dsi_write(dsi, DSIM_FIFOCTRL_REG, reg); - usleep_range(9000, 11000); - - /* DSI configuration */ - reg = 0; - - /* - * The first bit of mode_flags specifies display configuration. - * If this bit is set[= MIPI_DSI_MODE_VIDEO], dsi will support video - * mode, otherwise it will support command mode. - */ - if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { - reg |= DSIM_VIDEO_MODE; - - /* - * The user manual describes that following bits are ignored in - * command mode. - */ - if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH)) - reg |= DSIM_MFLUSH_VS; - if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) - reg |= DSIM_SYNC_INFORM; - if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) - reg |= DSIM_BURST_MODE; - if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_AUTO_VERT) - reg |= DSIM_AUTO_MODE; - if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSE) - reg |= DSIM_HSE_DISABLE_MODE; - if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HFP) - reg |= DSIM_HFP_DISABLE_MODE; - if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HBP) - reg |= DSIM_HBP_DISABLE_MODE; - if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HSA) - reg |= DSIM_HSA_DISABLE_MODE; - } - - if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET) - reg |= DSIM_EOT_DISABLE; - - switch (dsi->format) { - case MIPI_DSI_FMT_RGB888: - reg |= DSIM_MAIN_PIX_FORMAT_RGB888; - break; - case MIPI_DSI_FMT_RGB666: - reg |= DSIM_MAIN_PIX_FORMAT_RGB666; - break; - case MIPI_DSI_FMT_RGB666_PACKED: - reg |= DSIM_MAIN_PIX_FORMAT_RGB666_P; - break; - case MIPI_DSI_FMT_RGB565: - reg |= DSIM_MAIN_PIX_FORMAT_RGB565; - break; - default: - dev_err(dsi->dev, "invalid pixel format\n"); - return -EINVAL; - } - - /* - * Use non-continuous clock mode if the periparal wants and - * host controller supports - * - * In non-continous clock mode, host controller will turn off - * the HS clock between high-speed transmissions to reduce - * power consumption. - */ - if (driver_data->has_clklane_stop && - dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) { - reg |= DSIM_CLKLANE_STOP; - } - exynos_dsi_write(dsi, DSIM_CONFIG_REG, reg); - - lanes_mask = BIT(dsi->lanes) - 1; - exynos_dsi_enable_lane(dsi, lanes_mask); - - /* Check clock and data lane state are stop state */ - timeout = 100; - do { - if (timeout-- == 0) { - dev_err(dsi->dev, "waiting for bus lanes timed out\n"); - return -EFAULT; - } - - reg = exynos_dsi_read(dsi, DSIM_STATUS_REG); - if ((reg & DSIM_STOP_STATE_DAT(lanes_mask)) - != DSIM_STOP_STATE_DAT(lanes_mask)) - continue; - } while (!(reg & (DSIM_STOP_STATE_CLK | DSIM_TX_READY_HS_CLK))); - - reg = exynos_dsi_read(dsi, DSIM_ESCMODE_REG); - reg &= ~DSIM_STOP_STATE_CNT_MASK; - reg |= DSIM_STOP_STATE_CNT(driver_data->reg_values[STOP_STATE_CNT]); - exynos_dsi_write(dsi, DSIM_ESCMODE_REG, reg); - - reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff); - exynos_dsi_write(dsi, DSIM_TIMEOUT_REG, reg); - - return 0; -} - -static void exynos_dsi_set_display_mode(struct exynos_dsi *dsi) -{ - struct drm_display_mode *m = &dsi->mode; - unsigned int num_bits_resol = dsi->driver_data->num_bits_resol; - u32 reg; - - if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { - reg = DSIM_CMD_ALLOW(0xf) - | DSIM_STABLE_VFP(m->vsync_start - m->vdisplay) - | DSIM_MAIN_VBP(m->vtotal - m->vsync_end); - exynos_dsi_write(dsi, DSIM_MVPORCH_REG, reg); - - reg = DSIM_MAIN_HFP(m->hsync_start - m->hdisplay) - | DSIM_MAIN_HBP(m->htotal - m->hsync_end); - exynos_dsi_write(dsi, DSIM_MHPORCH_REG, reg); - - reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start) - | DSIM_MAIN_HSA(m->hsync_end - m->hsync_start); - exynos_dsi_write(dsi, DSIM_MSYNC_REG, reg); - } - reg = DSIM_MAIN_HRESOL(m->hdisplay, num_bits_resol) | - DSIM_MAIN_VRESOL(m->vdisplay, num_bits_resol); - - exynos_dsi_write(dsi, DSIM_MDRESOL_REG, reg); - - dev_dbg(dsi->dev, "LCD size = %dx%d\n", m->hdisplay, m->vdisplay); -} - -static void exynos_dsi_set_display_enable(struct exynos_dsi *dsi, bool enable) -{ - u32 reg; - - reg = exynos_dsi_read(dsi, DSIM_MDRESOL_REG); - if (enable) - reg |= DSIM_MAIN_STAND_BY; - else - reg &= ~DSIM_MAIN_STAND_BY; - exynos_dsi_write(dsi, DSIM_MDRESOL_REG, reg); -} - -static int exynos_dsi_wait_for_hdr_fifo(struct exynos_dsi *dsi) -{ - int timeout = 2000; - - do { - u32 reg = exynos_dsi_read(dsi, DSIM_FIFOCTRL_REG); - - if (!(reg & DSIM_SFR_HEADER_FULL)) - return 0; - - if (!cond_resched()) - usleep_range(950, 1050); - } while (--timeout); - - return -ETIMEDOUT; -} - -static void exynos_dsi_set_cmd_lpm(struct exynos_dsi *dsi, bool lpm) -{ - u32 v = exynos_dsi_read(dsi, DSIM_ESCMODE_REG); - - if (lpm) - v |= DSIM_CMD_LPDT_LP; - else - v &= ~DSIM_CMD_LPDT_LP; - - exynos_dsi_write(dsi, DSIM_ESCMODE_REG, v); -} - -static void exynos_dsi_force_bta(struct exynos_dsi *dsi) -{ - u32 v = exynos_dsi_read(dsi, DSIM_ESCMODE_REG); - v |= DSIM_FORCE_BTA; - exynos_dsi_write(dsi, DSIM_ESCMODE_REG, v); -} - -static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi, - struct exynos_dsi_transfer *xfer) -{ - struct device *dev = dsi->dev; - struct mipi_dsi_packet *pkt = &xfer->packet; - const u8 *payload = pkt->payload + xfer->tx_done; - u16 length = pkt->payload_length - xfer->tx_done; - bool first = !xfer->tx_done; - u32 reg; - - dev_dbg(dev, "< xfer %pK: tx len %u, done %u, rx len %u, done %u\n", - xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done); - - if (length > DSI_TX_FIFO_SIZE) - length = DSI_TX_FIFO_SIZE; - - xfer->tx_done += length; - - /* Send payload */ - while (length >= 4) { - reg = get_unaligned_le32(payload); - exynos_dsi_write(dsi, DSIM_PAYLOAD_REG, reg); - payload += 4; - length -= 4; - } - - reg = 0; - switch (length) { - case 3: - reg |= payload[2] << 16; - fallthrough; - case 2: - reg |= payload[1] << 8; - fallthrough; - case 1: - reg |= payload[0]; - exynos_dsi_write(dsi, DSIM_PAYLOAD_REG, reg); - break; - } - - /* Send packet header */ - if (!first) - return; - - reg = get_unaligned_le32(pkt->header); - if (exynos_dsi_wait_for_hdr_fifo(dsi)) { - dev_err(dev, "waiting for header FIFO timed out\n"); - return; - } - - if (NEQV(xfer->flags & MIPI_DSI_MSG_USE_LPM, - dsi->state & DSIM_STATE_CMD_LPM)) { - exynos_dsi_set_cmd_lpm(dsi, xfer->flags & MIPI_DSI_MSG_USE_LPM); - dsi->state ^= DSIM_STATE_CMD_LPM; - } - - exynos_dsi_write(dsi, DSIM_PKTHDR_REG, reg); - - if (xfer->flags & MIPI_DSI_MSG_REQ_ACK) - exynos_dsi_force_bta(dsi); -} - -static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi, - struct exynos_dsi_transfer *xfer) -{ - u8 *payload = xfer->rx_payload + xfer->rx_done; - bool first = !xfer->rx_done; - struct device *dev = dsi->dev; - u16 length; - u32 reg; - - if (first) { - reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG); - - switch (reg & 0x3f) { - case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE: - case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE: - if (xfer->rx_len >= 2) { - payload[1] = reg >> 16; - ++xfer->rx_done; - } - fallthrough; - case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: - case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: - payload[0] = reg >> 8; - ++xfer->rx_done; - xfer->rx_len = xfer->rx_done; - xfer->result = 0; - goto clear_fifo; - case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT: - dev_err(dev, "DSI Error Report: 0x%04x\n", - (reg >> 8) & 0xffff); - xfer->result = 0; - goto clear_fifo; - } - - length = (reg >> 8) & 0xffff; - if (length > xfer->rx_len) { - dev_err(dev, - "response too long (%u > %u bytes), stripping\n", - xfer->rx_len, length); - length = xfer->rx_len; - } else if (length < xfer->rx_len) - xfer->rx_len = length; - } - - length = xfer->rx_len - xfer->rx_done; - xfer->rx_done += length; - - /* Receive payload */ - while (length >= 4) { - reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG); - payload[0] = (reg >> 0) & 0xff; - payload[1] = (reg >> 8) & 0xff; - payload[2] = (reg >> 16) & 0xff; - payload[3] = (reg >> 24) & 0xff; - payload += 4; - length -= 4; - } - - if (length) { - reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG); - switch (length) { - case 3: - payload[2] = (reg >> 16) & 0xff; - fallthrough; - case 2: - payload[1] = (reg >> 8) & 0xff; - fallthrough; - case 1: - payload[0] = reg & 0xff; - } - } - - if (xfer->rx_done == xfer->rx_len) - xfer->result = 0; - -clear_fifo: - length = DSI_RX_FIFO_SIZE / 4; - do { - reg = exynos_dsi_read(dsi, DSIM_RXFIFO_REG); - if (reg == DSI_RX_FIFO_EMPTY) - break; - } while (--length); -} - -static void exynos_dsi_transfer_start(struct exynos_dsi *dsi) -{ - unsigned long flags; - struct exynos_dsi_transfer *xfer; - bool start = false; - -again: - spin_lock_irqsave(&dsi->transfer_lock, flags); - - if (list_empty(&dsi->transfer_list)) { - spin_unlock_irqrestore(&dsi->transfer_lock, flags); - return; - } - - xfer = list_first_entry(&dsi->transfer_list, - struct exynos_dsi_transfer, list); - - spin_unlock_irqrestore(&dsi->transfer_lock, flags); - - if (xfer->packet.payload_length && - xfer->tx_done == xfer->packet.payload_length) - /* waiting for RX */ - return; - - exynos_dsi_send_to_fifo(dsi, xfer); - - if (xfer->packet.payload_length || xfer->rx_len) - return; - - xfer->result = 0; - complete(&xfer->completed); - - spin_lock_irqsave(&dsi->transfer_lock, flags); - - list_del_init(&xfer->list); - start = !list_empty(&dsi->transfer_list); - - spin_unlock_irqrestore(&dsi->transfer_lock, flags); - - if (start) - goto again; -} - -static bool exynos_dsi_transfer_finish(struct exynos_dsi *dsi) -{ - struct exynos_dsi_transfer *xfer; - unsigned long flags; - bool start = true; - - spin_lock_irqsave(&dsi->transfer_lock, flags); - - if (list_empty(&dsi->transfer_list)) { - spin_unlock_irqrestore(&dsi->transfer_lock, flags); - return false; - } - - xfer = list_first_entry(&dsi->transfer_list, - struct exynos_dsi_transfer, list); - - spin_unlock_irqrestore(&dsi->transfer_lock, flags); - - dev_dbg(dsi->dev, - "> xfer %pK, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n", - xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len, - xfer->rx_done); - - if (xfer->tx_done != xfer->packet.payload_length) - return true; - - if (xfer->rx_done != xfer->rx_len) - exynos_dsi_read_from_fifo(dsi, xfer); - - if (xfer->rx_done != xfer->rx_len) - return true; - - spin_lock_irqsave(&dsi->transfer_lock, flags); - - list_del_init(&xfer->list); - start = !list_empty(&dsi->transfer_list); - - spin_unlock_irqrestore(&dsi->transfer_lock, flags); - - if (!xfer->rx_len) - xfer->result = 0; - complete(&xfer->completed); - - return start; -} - -static void exynos_dsi_remove_transfer(struct exynos_dsi *dsi, - struct exynos_dsi_transfer *xfer) +static irqreturn_t exynos_dsi_te_irq_handler(struct samsung_dsim *dsim) { - unsigned long flags; - bool start; - - spin_lock_irqsave(&dsi->transfer_lock, flags); - - if (!list_empty(&dsi->transfer_list) && - xfer == list_first_entry(&dsi->transfer_list, - struct exynos_dsi_transfer, list)) { - list_del_init(&xfer->list); - start = !list_empty(&dsi->transfer_list); - spin_unlock_irqrestore(&dsi->transfer_lock, flags); - if (start) - exynos_dsi_transfer_start(dsi); - return; - } - - list_del_init(&xfer->list); - - spin_unlock_irqrestore(&dsi->transfer_lock, flags); -} - -static int exynos_dsi_transfer(struct exynos_dsi *dsi, - struct exynos_dsi_transfer *xfer) -{ - unsigned long flags; - bool stopped; - - xfer->tx_done = 0; - xfer->rx_done = 0; - xfer->result = -ETIMEDOUT; - init_completion(&xfer->completed); - - spin_lock_irqsave(&dsi->transfer_lock, flags); - - stopped = list_empty(&dsi->transfer_list); - list_add_tail(&xfer->list, &dsi->transfer_list); - - spin_unlock_irqrestore(&dsi->transfer_lock, flags); - - if (stopped) - exynos_dsi_transfer_start(dsi); - - wait_for_completion_timeout(&xfer->completed, - msecs_to_jiffies(DSI_XFER_TIMEOUT_MS)); - if (xfer->result == -ETIMEDOUT) { - struct mipi_dsi_packet *pkt = &xfer->packet; - exynos_dsi_remove_transfer(dsi, xfer); - dev_err(dsi->dev, "xfer timed out: %*ph %*ph\n", 4, pkt->header, - (int)pkt->payload_length, pkt->payload); - return -ETIMEDOUT; - } - - /* Also covers hardware timeout condition */ - return xfer->result; -} - -static irqreturn_t exynos_dsi_irq(int irq, void *dev_id) -{ - struct exynos_dsi *dsi = dev_id; - u32 status; - - status = exynos_dsi_read(dsi, DSIM_INTSRC_REG); - if (!status) { - static unsigned long int j; - if (printk_timed_ratelimit(&j, 500)) - dev_warn(dsi->dev, "spurious interrupt\n"); - return IRQ_HANDLED; - } - exynos_dsi_write(dsi, DSIM_INTSRC_REG, status); - - if (status & DSIM_INT_SW_RST_RELEASE) { - u32 mask = ~(DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY | - DSIM_INT_SFR_HDR_FIFO_EMPTY | DSIM_INT_RX_ECC_ERR | - DSIM_INT_SW_RST_RELEASE); - exynos_dsi_write(dsi, DSIM_INTMSK_REG, mask); - complete(&dsi->completed); - return IRQ_HANDLED; - } - - if (!(status & (DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY | - DSIM_INT_PLL_STABLE))) - return IRQ_HANDLED; - - if (exynos_dsi_transfer_finish(dsi)) - exynos_dsi_transfer_start(dsi); - - return IRQ_HANDLED; -} - -static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id) -{ - struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id; + struct exynos_dsi *dsi = dsim->priv; struct drm_encoder *encoder = &dsi->encoder; - if (dsi->state & DSIM_STATE_VIDOUT_AVAILABLE) + if (dsim->state & DSIM_STATE_VIDOUT_AVAILABLE) exynos_drm_crtc_te_handler(encoder->crtc); return IRQ_HANDLED; } -static void exynos_dsi_enable_irq(struct exynos_dsi *dsi) -{ - enable_irq(dsi->irq); - - if (dsi->te_gpio) - enable_irq(gpiod_to_irq(dsi->te_gpio)); -} - -static void exynos_dsi_disable_irq(struct exynos_dsi *dsi) -{ - if (dsi->te_gpio) - disable_irq(gpiod_to_irq(dsi->te_gpio)); - - disable_irq(dsi->irq); -} - -static int exynos_dsi_init(struct exynos_dsi *dsi) -{ - const struct exynos_dsi_driver_data *driver_data = dsi->driver_data; - - exynos_dsi_reset(dsi); - exynos_dsi_enable_irq(dsi); - - if (driver_data->reg_values[RESET_TYPE] == DSIM_FUNCRST) - exynos_dsi_enable_lane(dsi, BIT(dsi->lanes) - 1); - - exynos_dsi_enable_clock(dsi); - if (driver_data->wait_for_reset) - exynos_dsi_wait_for_reset(dsi); - exynos_dsi_set_phy_ctrl(dsi); - exynos_dsi_init_link(dsi); - - return 0; -} - -static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi, - struct device *panel) -{ - int ret; - int te_gpio_irq; - - dsi->te_gpio = gpiod_get_optional(panel, "te", GPIOD_IN); - if (!dsi->te_gpio) { - return 0; - } else if (IS_ERR(dsi->te_gpio)) { - dev_err(dsi->dev, "gpio request failed with %ld\n", - PTR_ERR(dsi->te_gpio)); - return PTR_ERR(dsi->te_gpio); - } - - te_gpio_irq = gpiod_to_irq(dsi->te_gpio); - - ret = request_threaded_irq(te_gpio_irq, exynos_dsi_te_irq_handler, NULL, - IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN, "TE", dsi); - if (ret) { - dev_err(dsi->dev, "request interrupt failed with %d\n", ret); - gpiod_put(dsi->te_gpio); - return ret; - } - - return 0; -} - -static void exynos_dsi_unregister_te_irq(struct exynos_dsi *dsi) -{ - if (dsi->te_gpio) { - free_irq(gpiod_to_irq(dsi->te_gpio), dsi); - gpiod_put(dsi->te_gpio); - } -} - -static void exynos_dsi_atomic_pre_enable(struct drm_bridge *bridge, - struct drm_bridge_state *old_bridge_state) -{ - struct exynos_dsi *dsi = bridge_to_dsi(bridge); - int ret; - - if (dsi->state & DSIM_STATE_ENABLED) - return; - - ret = pm_runtime_resume_and_get(dsi->dev); - if (ret < 0) { - dev_err(dsi->dev, "failed to enable DSI device.\n"); - return; - } - - dsi->state |= DSIM_STATE_ENABLED; -} - -static void exynos_dsi_atomic_enable(struct drm_bridge *bridge, - struct drm_bridge_state *old_bridge_state) -{ - struct exynos_dsi *dsi = bridge_to_dsi(bridge); - - exynos_dsi_set_display_mode(dsi); - exynos_dsi_set_display_enable(dsi, true); - - dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE; - - return; -} - -static void exynos_dsi_atomic_disable(struct drm_bridge *bridge, - struct drm_bridge_state *old_bridge_state) -{ - struct exynos_dsi *dsi = bridge_to_dsi(bridge); - - if (!(dsi->state & DSIM_STATE_ENABLED)) - return; - - dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE; -} - -static void exynos_dsi_atomic_post_disable(struct drm_bridge *bridge, - struct drm_bridge_state *old_bridge_state) -{ - struct exynos_dsi *dsi = bridge_to_dsi(bridge); - - exynos_dsi_set_display_enable(dsi, false); - - dsi->state &= ~DSIM_STATE_ENABLED; - pm_runtime_put_sync(dsi->dev); -} - -static void exynos_dsi_mode_set(struct drm_bridge *bridge, - const struct drm_display_mode *mode, - const struct drm_display_mode *adjusted_mode) -{ - struct exynos_dsi *dsi = bridge_to_dsi(bridge); - - drm_mode_copy(&dsi->mode, adjusted_mode); -} - -static int exynos_dsi_attach(struct drm_bridge *bridge, - enum drm_bridge_attach_flags flags) -{ - struct exynos_dsi *dsi = bridge_to_dsi(bridge); - - return drm_bridge_attach(bridge->encoder, dsi->out_bridge, bridge, - flags); -} - -static const struct drm_bridge_funcs exynos_dsi_bridge_funcs = { - .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, - .atomic_reset = drm_atomic_helper_bridge_reset, - .atomic_pre_enable = exynos_dsi_atomic_pre_enable, - .atomic_enable = exynos_dsi_atomic_enable, - .atomic_disable = exynos_dsi_atomic_disable, - .atomic_post_disable = exynos_dsi_atomic_post_disable, - .mode_set = exynos_dsi_mode_set, - .attach = exynos_dsi_attach, -}; - -MODULE_DEVICE_TABLE(of, exynos_dsi_of_match); - -static int exynos_dsi_host_attach(struct mipi_dsi_host *host, +static int exynos_dsi_host_attach(struct samsung_dsim *dsim, struct mipi_dsi_device *device) { - struct exynos_dsi *dsi = host_to_dsi(host); - struct device *dev = dsi->dev; + struct exynos_dsi *dsi = dsim->priv; struct drm_encoder *encoder = &dsi->encoder; struct drm_device *drm = encoder->dev; - struct drm_panel *panel; - int ret; - - panel = of_drm_find_panel(device->dev.of_node); - if (!IS_ERR(panel)) { - dsi->out_bridge = devm_drm_panel_bridge_add(dev, panel); - } else { - dsi->out_bridge = of_drm_find_bridge(device->dev.of_node); - if (!dsi->out_bridge) - dsi->out_bridge = ERR_PTR(-EINVAL); - } - - if (IS_ERR(dsi->out_bridge)) { - ret = PTR_ERR(dsi->out_bridge); - DRM_DEV_ERROR(dev, "failed to find the bridge: %d\n", ret); - return ret; - } - - DRM_DEV_INFO(dev, "Attached %s device\n", device->name); - drm_bridge_add(&dsi->bridge); - - drm_bridge_attach(encoder, &dsi->bridge, + drm_bridge_attach(encoder, &dsim->bridge, list_first_entry_or_null(&encoder->bridge_chain, struct drm_bridge, chain_node), 0); - /* - * This is a temporary solution and should be made by more generic way. - * - * If attached panel device is for command mode one, dsi should register - * TE interrupt handler. - */ - if (!(device->mode_flags & MIPI_DSI_MODE_VIDEO)) { - ret = exynos_dsi_register_te_irq(dsi, &device->dev); - if (ret) - return ret; - } - mutex_lock(&drm->mode_config.mutex); - dsi->lanes = device->lanes; - dsi->format = device->format; - dsi->mode_flags = device->mode_flags; + dsim->lanes = device->lanes; + dsim->format = device->format; + dsim->mode_flags = device->mode_flags; exynos_drm_crtc_get_by_type(drm, EXYNOS_DISPLAY_TYPE_LCD)->i80_mode = - !(dsi->mode_flags & MIPI_DSI_MODE_VIDEO); + !(dsim->mode_flags & MIPI_DSI_MODE_VIDEO); mutex_unlock(&drm->mode_config.mutex); @@ -1525,100 +60,20 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host, return 0; } -static int exynos_dsi_host_detach(struct mipi_dsi_host *host, - struct mipi_dsi_device *device) +static void exynos_dsi_host_detach(struct samsung_dsim *dsim, + struct mipi_dsi_device *device) { - struct exynos_dsi *dsi = host_to_dsi(host); + struct exynos_dsi *dsi = dsim->priv; struct drm_device *drm = dsi->encoder.dev; - if (dsi->out_bridge->funcs->detach) - dsi->out_bridge->funcs->detach(dsi->out_bridge); - dsi->out_bridge = NULL; - if (drm->mode_config.poll_enabled) drm_kms_helper_hotplug_event(drm); - - exynos_dsi_unregister_te_irq(dsi); - - drm_bridge_remove(&dsi->bridge); - - return 0; } -static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host, - const struct mipi_dsi_msg *msg) +static int exynos_dsi_bind(struct device *dev, struct device *master, void *data) { - struct exynos_dsi *dsi = host_to_dsi(host); - struct exynos_dsi_transfer xfer; - int ret; - - if (!(dsi->state & DSIM_STATE_ENABLED)) - return -EINVAL; - - if (!(dsi->state & DSIM_STATE_INITIALIZED)) { - ret = exynos_dsi_init(dsi); - if (ret) - return ret; - dsi->state |= DSIM_STATE_INITIALIZED; - } - - ret = mipi_dsi_create_packet(&xfer.packet, msg); - if (ret < 0) - return ret; - - xfer.rx_len = msg->rx_len; - xfer.rx_payload = msg->rx_buf; - xfer.flags = msg->flags; - - ret = exynos_dsi_transfer(dsi, &xfer); - return (ret < 0) ? ret : xfer.rx_done; -} - -static const struct mipi_dsi_host_ops exynos_dsi_ops = { - .attach = exynos_dsi_host_attach, - .detach = exynos_dsi_host_detach, - .transfer = exynos_dsi_host_transfer, -}; - -static int exynos_dsi_of_read_u32(const struct device_node *np, - const char *propname, u32 *out_value) -{ - int ret = of_property_read_u32(np, propname, out_value); - - if (ret < 0) - pr_err("%pOF: failed to get '%s' property\n", np, propname); - - return ret; -} - -static int exynos_dsi_parse_dt(struct exynos_dsi *dsi) -{ - struct device *dev = dsi->dev; - struct device_node *node = dev->of_node; - int ret; - - ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency", - &dsi->pll_clk_rate); - if (ret < 0) - return ret; - - ret = exynos_dsi_of_read_u32(node, "samsung,burst-clock-frequency", - &dsi->burst_clk_rate); - if (ret < 0) - return ret; - - ret = exynos_dsi_of_read_u32(node, "samsung,esc-clock-frequency", - &dsi->esc_clk_rate); - if (ret < 0) - return ret; - - return 0; -} - -static int exynos_dsi_bind(struct device *dev, struct device *master, - void *data) -{ - struct exynos_dsi *dsi = dev_get_drvdata(dev); + struct samsung_dsim *dsim = dev_get_drvdata(dev); + struct exynos_dsi *dsi = dsim->priv; struct drm_encoder *encoder = &dsi->encoder; struct drm_device *drm_dev = data; int ret; @@ -1629,17 +84,16 @@ static int exynos_dsi_bind(struct device *dev, struct device *master, if (ret < 0) return ret; - return mipi_dsi_host_register(&dsi->dsi_host); + return mipi_dsi_host_register(&dsim->dsi_host); } -static void exynos_dsi_unbind(struct device *dev, struct device *master, - void *data) +static void exynos_dsi_unbind(struct device *dev, struct device *master, void *data) { - struct exynos_dsi *dsi = dev_get_drvdata(dev); + struct samsung_dsim *dsim = dev_get_drvdata(dev); - exynos_dsi_atomic_disable(&dsi->bridge, NULL); + dsim->bridge.funcs->atomic_disable(&dsim->bridge, NULL); - mipi_dsi_host_unregister(&dsi->dsi_host); + mipi_dsi_host_unregister(&dsim->dsi_host); } static const struct component_ops exynos_dsi_component_ops = { @@ -1647,189 +101,90 @@ static const struct component_ops exynos_dsi_component_ops = { .unbind = exynos_dsi_unbind, }; -static int exynos_dsi_probe(struct platform_device *pdev) +static int exynos_dsi_register_host(struct samsung_dsim *dsim) { - struct device *dev = &pdev->dev; struct exynos_dsi *dsi; - int ret, i; - dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); + dsi = devm_kzalloc(dsim->dev, sizeof(*dsi), GFP_KERNEL); if (!dsi) return -ENOMEM; - init_completion(&dsi->completed); - spin_lock_init(&dsi->transfer_lock); - INIT_LIST_HEAD(&dsi->transfer_list); - - dsi->dsi_host.ops = &exynos_dsi_ops; - dsi->dsi_host.dev = dev; + dsim->priv = dsi; + dsim->bridge.pre_enable_prev_first = true; - dsi->dev = dev; - dsi->driver_data = of_device_get_match_data(dev); - - dsi->supplies[0].supply = "vddcore"; - dsi->supplies[1].supply = "vddio"; - ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies), - dsi->supplies); - if (ret) - return dev_err_probe(dev, ret, "failed to get regulators\n"); - - dsi->clks = devm_kcalloc(dev, - dsi->driver_data->num_clks, sizeof(*dsi->clks), - GFP_KERNEL); - if (!dsi->clks) - return -ENOMEM; - - for (i = 0; i < dsi->driver_data->num_clks; i++) { - dsi->clks[i] = devm_clk_get(dev, clk_names[i]); - if (IS_ERR(dsi->clks[i])) { - if (strcmp(clk_names[i], "sclk_mipi") == 0) { - dsi->clks[i] = devm_clk_get(dev, - OLD_SCLK_MIPI_CLK_NAME); - if (!IS_ERR(dsi->clks[i])) - continue; - } - - dev_info(dev, "failed to get the clock: %s\n", - clk_names[i]); - return PTR_ERR(dsi->clks[i]); - } - } - - dsi->reg_base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(dsi->reg_base)) - return PTR_ERR(dsi->reg_base); - - dsi->phy = devm_phy_get(dev, "dsim"); - if (IS_ERR(dsi->phy)) { - dev_info(dev, "failed to get dsim phy\n"); - return PTR_ERR(dsi->phy); - } - - dsi->irq = platform_get_irq(pdev, 0); - if (dsi->irq < 0) - return dsi->irq; - - ret = devm_request_threaded_irq(dev, dsi->irq, NULL, - exynos_dsi_irq, - IRQF_ONESHOT | IRQF_NO_AUTOEN, - dev_name(dev), dsi); - if (ret) { - dev_err(dev, "failed to request dsi irq\n"); - return ret; - } - - ret = exynos_dsi_parse_dt(dsi); - if (ret) - return ret; - - platform_set_drvdata(pdev, dsi); - - pm_runtime_enable(dev); - - dsi->bridge.funcs = &exynos_dsi_bridge_funcs; - dsi->bridge.of_node = dev->of_node; - dsi->bridge.type = DRM_MODE_CONNECTOR_DSI; - dsi->bridge.pre_enable_prev_first = true; - - ret = component_add(dev, &exynos_dsi_component_ops); - if (ret) - goto err_disable_runtime; - - return 0; - -err_disable_runtime: - pm_runtime_disable(dev); - - return ret; + return component_add(dsim->dev, &exynos_dsi_component_ops); } -static int exynos_dsi_remove(struct platform_device *pdev) +static void exynos_dsi_unregister_host(struct samsung_dsim *dsim) { - pm_runtime_disable(&pdev->dev); - - component_del(&pdev->dev, &exynos_dsi_component_ops); - - return 0; -} - -static int __maybe_unused exynos_dsi_suspend(struct device *dev) -{ - struct exynos_dsi *dsi = dev_get_drvdata(dev); - const struct exynos_dsi_driver_data *driver_data = dsi->driver_data; - int ret, i; - - usleep_range(10000, 20000); - - if (dsi->state & DSIM_STATE_INITIALIZED) { - dsi->state &= ~DSIM_STATE_INITIALIZED; - - exynos_dsi_disable_clock(dsi); - - exynos_dsi_disable_irq(dsi); - } - - dsi->state &= ~DSIM_STATE_CMD_LPM; - - phy_power_off(dsi->phy); - - for (i = driver_data->num_clks - 1; i > -1; i--) - clk_disable_unprepare(dsi->clks[i]); - - ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies); - if (ret < 0) - dev_err(dsi->dev, "cannot disable regulators %d\n", ret); - - return 0; + component_del(dsim->dev, &exynos_dsi_component_ops); } -static int __maybe_unused exynos_dsi_resume(struct device *dev) -{ - struct exynos_dsi *dsi = dev_get_drvdata(dev); - const struct exynos_dsi_driver_data *driver_data = dsi->driver_data; - int ret, i; - - ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies); - if (ret < 0) { - dev_err(dsi->dev, "cannot enable regulators %d\n", ret); - return ret; - } +static const struct samsung_dsim_host_ops exynos_dsi_exynos_host_ops = { + .register_host = exynos_dsi_register_host, + .unregister_host = exynos_dsi_unregister_host, + .attach = exynos_dsi_host_attach, + .detach = exynos_dsi_host_detach, + .te_irq_handler = exynos_dsi_te_irq_handler, +}; - for (i = 0; i < driver_data->num_clks; i++) { - ret = clk_prepare_enable(dsi->clks[i]); - if (ret < 0) - goto err_clk; - } +static const struct samsung_dsim_plat_data exynos3250_dsi_pdata = { + .hw_type = DSIM_TYPE_EXYNOS3250, + .host_ops = &exynos_dsi_exynos_host_ops, +}; - ret = phy_power_on(dsi->phy); - if (ret < 0) { - dev_err(dsi->dev, "cannot enable phy %d\n", ret); - goto err_clk; - } +static const struct samsung_dsim_plat_data exynos4210_dsi_pdata = { + .hw_type = DSIM_TYPE_EXYNOS4210, + .host_ops = &exynos_dsi_exynos_host_ops, +}; - return 0; +static const struct samsung_dsim_plat_data exynos5410_dsi_pdata = { + .hw_type = DSIM_TYPE_EXYNOS5410, + .host_ops = &exynos_dsi_exynos_host_ops, +}; -err_clk: - while (--i > -1) - clk_disable_unprepare(dsi->clks[i]); - regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies); +static const struct samsung_dsim_plat_data exynos5422_dsi_pdata = { + .hw_type = DSIM_TYPE_EXYNOS5422, + .host_ops = &exynos_dsi_exynos_host_ops, +}; - return ret; -} +static const struct samsung_dsim_plat_data exynos5433_dsi_pdata = { + .hw_type = DSIM_TYPE_EXYNOS5433, + .host_ops = &exynos_dsi_exynos_host_ops, +}; -static const struct dev_pm_ops exynos_dsi_pm_ops = { - SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL) - SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, - pm_runtime_force_resume) +static const struct of_device_id exynos_dsi_of_match[] = { + { + .compatible = "samsung,exynos3250-mipi-dsi", + .data = &exynos3250_dsi_pdata, + }, + { + .compatible = "samsung,exynos4210-mipi-dsi", + .data = &exynos4210_dsi_pdata, + }, + { + .compatible = "samsung,exynos5410-mipi-dsi", + .data = &exynos5410_dsi_pdata, + }, + { + .compatible = "samsung,exynos5422-mipi-dsi", + .data = &exynos5422_dsi_pdata, + }, + { + .compatible = "samsung,exynos5433-mipi-dsi", + .data = &exynos5433_dsi_pdata, + }, + { /* sentinel. */ } }; +MODULE_DEVICE_TABLE(of, exynos_dsi_of_match); struct platform_driver dsi_driver = { - .probe = exynos_dsi_probe, - .remove = exynos_dsi_remove, + .probe = samsung_dsim_probe, + .remove = samsung_dsim_remove, .driver = { .name = "exynos-dsi", .owner = THIS_MODULE, - .pm = &exynos_dsi_pm_ops, + .pm = &samsung_dsim_pm_ops, .of_match_table = exynos_dsi_of_match, }, }; diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile index 63012bf2485a..4f302cd5e1a6 100644 --- a/drivers/gpu/drm/gma500/Makefile +++ b/drivers/gpu/drm/gma500/Makefile @@ -38,5 +38,6 @@ gma500_gfx-y += \ psb_irq.o gma500_gfx-$(CONFIG_ACPI) += opregion.o +gma500_gfx-$(CONFIG_DRM_FBDEV_EMULATION) += fbdev.o obj-$(CONFIG_DRM_GMA500) += gma500_gfx.o diff --git a/drivers/gpu/drm/gma500/fbdev.c b/drivers/gpu/drm/gma500/fbdev.c new file mode 100644 index 000000000000..62287407e717 --- /dev/null +++ b/drivers/gpu/drm/gma500/fbdev.c @@ -0,0 +1,344 @@ +// SPDX-License-Identifier: GPL-2.0-only +/************************************************************************** + * Copyright (c) 2007-2011, Intel Corporation. + * All Rights Reserved. + * + **************************************************************************/ + +#include <linux/pfn_t.h> + +#include <drm/drm_crtc_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_framebuffer.h> + +#include "gem.h" +#include "psb_drv.h" + +/* + * VM area struct + */ + +static vm_fault_t psb_fbdev_vm_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct fb_info *info = vma->vm_private_data; + unsigned long address = vmf->address - (vmf->pgoff << PAGE_SHIFT); + unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT; + vm_fault_t err = VM_FAULT_SIGBUS; + unsigned long page_num = vma_pages(vma); + unsigned long i; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + for (i = 0; i < page_num; ++i) { + err = vmf_insert_mixed(vma, address, __pfn_to_pfn_t(pfn, PFN_DEV)); + if (unlikely(err & VM_FAULT_ERROR)) + break; + address += PAGE_SIZE; + ++pfn; + } + + return err; +} + +static const struct vm_operations_struct psb_fbdev_vm_ops = { + .fault = psb_fbdev_vm_fault, +}; + +/* + * struct fb_ops + */ + +#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16) + +static int psb_fbdev_fb_setcolreg(unsigned int regno, + unsigned int red, unsigned int green, + unsigned int blue, unsigned int transp, + struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct drm_framebuffer *fb = fb_helper->fb; + uint32_t v; + + if (!fb) + return -ENOMEM; + + if (regno > 255) + return 1; + + red = CMAP_TOHW(red, info->var.red.length); + blue = CMAP_TOHW(blue, info->var.blue.length); + green = CMAP_TOHW(green, info->var.green.length); + transp = CMAP_TOHW(transp, info->var.transp.length); + + v = (red << info->var.red.offset) | + (green << info->var.green.offset) | + (blue << info->var.blue.offset) | + (transp << info->var.transp.offset); + + if (regno < 16) { + switch (fb->format->cpp[0] * 8) { + case 16: + ((uint32_t *) info->pseudo_palette)[regno] = v; + break; + case 24: + case 32: + ((uint32_t *) info->pseudo_palette)[regno] = v; + break; + } + } + + return 0; +} + +static int psb_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) +{ + if (vma->vm_pgoff != 0) + return -EINVAL; + if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) + return -EINVAL; + + /* + * If this is a GEM object then info->screen_base is the virtual + * kernel remapping of the object. FIXME: Review if this is + * suitable for our mmap work + */ + vma->vm_ops = &psb_fbdev_vm_ops; + vma->vm_private_data = info; + vm_flags_set(vma, VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP); + + return 0; +} + +static void psb_fbdev_fb_destroy(struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct drm_framebuffer *fb = fb_helper->fb; + struct drm_gem_object *obj = fb->obj[0]; + + drm_fb_helper_fini(fb_helper); + + drm_framebuffer_unregister_private(fb); + fb->obj[0] = NULL; + drm_framebuffer_cleanup(fb); + kfree(fb); + + drm_gem_object_put(obj); + + drm_client_release(&fb_helper->client); + + drm_fb_helper_unprepare(fb_helper); + kfree(fb_helper); +} + +static const struct fb_ops psb_fbdev_fb_ops = { + .owner = THIS_MODULE, + DRM_FB_HELPER_DEFAULT_OPS, + .fb_setcolreg = psb_fbdev_fb_setcolreg, + .fb_read = drm_fb_helper_cfb_read, + .fb_write = drm_fb_helper_cfb_write, + .fb_fillrect = drm_fb_helper_cfb_fillrect, + .fb_copyarea = drm_fb_helper_cfb_copyarea, + .fb_imageblit = drm_fb_helper_cfb_imageblit, + .fb_mmap = psb_fbdev_fb_mmap, + .fb_destroy = psb_fbdev_fb_destroy, +}; + +/* + * struct drm_fb_helper_funcs + */ + +static int psb_fbdev_fb_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct drm_device *dev = fb_helper->dev; + struct drm_psb_private *dev_priv = to_drm_psb_private(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); + struct fb_info *info; + struct drm_framebuffer *fb; + struct drm_mode_fb_cmd2 mode_cmd = { }; + int size; + int ret; + struct psb_gem_object *backing; + struct drm_gem_object *obj; + u32 bpp, depth; + + /* No 24-bit packed mode */ + if (sizes->surface_bpp == 24) { + sizes->surface_bpp = 32; + sizes->surface_depth = 24; + } + bpp = sizes->surface_bpp; + depth = sizes->surface_depth; + + /* + * If the mode does not fit in 32 bit then switch to 16 bit to get + * a console on full resolution. The X mode setting server will + * allocate its own 32-bit GEM framebuffer. + */ + size = ALIGN(sizes->surface_width * DIV_ROUND_UP(bpp, 8), 64) * + sizes->surface_height; + size = ALIGN(size, PAGE_SIZE); + + if (size > dev_priv->vram_stolen_size) { + sizes->surface_bpp = 16; + sizes->surface_depth = 16; + } + bpp = sizes->surface_bpp; + depth = sizes->surface_depth; + + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + mode_cmd.pitches[0] = ALIGN(mode_cmd.width * DIV_ROUND_UP(bpp, 8), 64); + mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); + + size = mode_cmd.pitches[0] * mode_cmd.height; + size = ALIGN(size, PAGE_SIZE); + + /* Allocate the framebuffer in the GTT with stolen page backing */ + backing = psb_gem_create(dev, size, "fb", true, PAGE_SIZE); + if (IS_ERR(backing)) + return PTR_ERR(backing); + obj = &backing->base; + + fb = psb_framebuffer_create(dev, &mode_cmd, obj); + if (IS_ERR(fb)) { + ret = PTR_ERR(fb); + goto err_drm_gem_object_put; + } + + fb_helper->fb = fb; + + info = drm_fb_helper_alloc_info(fb_helper); + if (IS_ERR(info)) { + ret = PTR_ERR(info); + goto err_drm_framebuffer_unregister_private; + } + + info->fbops = &psb_fbdev_fb_ops; + info->flags = FBINFO_DEFAULT; + /* Accessed stolen memory directly */ + info->screen_base = dev_priv->vram_addr + backing->offset; + info->screen_size = size; + + drm_fb_helper_fill_info(info, fb_helper, sizes); + + info->fix.smem_start = dev_priv->stolen_base + backing->offset; + info->fix.smem_len = size; + info->fix.ywrapstep = 0; + info->fix.ypanstep = 0; + info->fix.mmio_start = pci_resource_start(pdev, 0); + info->fix.mmio_len = pci_resource_len(pdev, 0); + + memset(info->screen_base, 0, info->screen_size); + + /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ + + dev_dbg(dev->dev, "allocated %dx%d fb\n", fb->width, fb->height); + + return 0; + +err_drm_framebuffer_unregister_private: + drm_framebuffer_unregister_private(fb); + fb->obj[0] = NULL; + drm_framebuffer_cleanup(fb); + kfree(fb); +err_drm_gem_object_put: + drm_gem_object_put(obj); + return ret; +} + +static const struct drm_fb_helper_funcs psb_fbdev_fb_helper_funcs = { + .fb_probe = psb_fbdev_fb_probe, +}; + +/* + * struct drm_client_funcs and setup code + */ + +static void psb_fbdev_client_unregister(struct drm_client_dev *client) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + + if (fb_helper->info) { + drm_fb_helper_unregister_info(fb_helper); + } else { + drm_fb_helper_unprepare(fb_helper); + drm_client_release(&fb_helper->client); + kfree(fb_helper); + } +} + +static int psb_fbdev_client_restore(struct drm_client_dev *client) +{ + drm_fb_helper_lastclose(client->dev); + + return 0; +} + +static int psb_fbdev_client_hotplug(struct drm_client_dev *client) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + struct drm_device *dev = client->dev; + int ret; + + if (dev->fb_helper) + return drm_fb_helper_hotplug_event(dev->fb_helper); + + ret = drm_fb_helper_init(dev, fb_helper); + if (ret) + goto err_drm_err; + + if (!drm_drv_uses_atomic_modeset(dev)) + drm_helper_disable_unused_functions(dev); + + ret = drm_fb_helper_initial_config(fb_helper); + if (ret) + goto err_drm_fb_helper_fini; + + return 0; + +err_drm_fb_helper_fini: + drm_fb_helper_fini(fb_helper); +err_drm_err: + drm_err(dev, "Failed to setup gma500 fbdev emulation (ret=%d)\n", ret); + return ret; +} + +static const struct drm_client_funcs psb_fbdev_client_funcs = { + .owner = THIS_MODULE, + .unregister = psb_fbdev_client_unregister, + .restore = psb_fbdev_client_restore, + .hotplug = psb_fbdev_client_hotplug, +}; + +void psb_fbdev_setup(struct drm_psb_private *dev_priv) +{ + struct drm_device *dev = &dev_priv->dev; + struct drm_fb_helper *fb_helper; + int ret; + + fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); + if (!fb_helper) + return; + drm_fb_helper_prepare(dev, fb_helper, 32, &psb_fbdev_fb_helper_funcs); + + ret = drm_client_init(dev, &fb_helper->client, "fbdev-gma500", &psb_fbdev_client_funcs); + if (ret) { + drm_err(dev, "Failed to register client: %d\n", ret); + goto err_drm_fb_helper_unprepare; + } + + ret = psb_fbdev_client_hotplug(&fb_helper->client); + if (ret) + drm_dbg_kms(dev, "client hotplug ret=%d\n", ret); + + drm_client_register(&fb_helper->client); + + return; + +err_drm_fb_helper_unprepare: + drm_fb_helper_unprepare(fb_helper); + kfree(fb_helper); +} diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index 50611eb7f134..1a374702b696 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -5,158 +5,18 @@ * **************************************************************************/ -#include <linux/console.h> -#include <linux/delay.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/module.h> -#include <linux/pfn_t.h> -#include <linux/slab.h> -#include <linux/string.h> -#include <linux/tty.h> - -#include <drm/drm.h> -#include <drm/drm_crtc.h> -#include <drm/drm_crtc_helper.h> -#include <drm/drm_fb_helper.h> -#include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_modeset_helper.h> #include "framebuffer.h" -#include "gem.h" #include "psb_drv.h" -#include "psb_intel_drv.h" -#include "psb_intel_reg.h" static const struct drm_framebuffer_funcs psb_fb_funcs = { .destroy = drm_gem_fb_destroy, .create_handle = drm_gem_fb_create_handle, }; -#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16) - -static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green, - unsigned blue, unsigned transp, - struct fb_info *info) -{ - struct drm_fb_helper *fb_helper = info->par; - struct drm_framebuffer *fb = fb_helper->fb; - uint32_t v; - - if (!fb) - return -ENOMEM; - - if (regno > 255) - return 1; - - red = CMAP_TOHW(red, info->var.red.length); - blue = CMAP_TOHW(blue, info->var.blue.length); - green = CMAP_TOHW(green, info->var.green.length); - transp = CMAP_TOHW(transp, info->var.transp.length); - - v = (red << info->var.red.offset) | - (green << info->var.green.offset) | - (blue << info->var.blue.offset) | - (transp << info->var.transp.offset); - - if (regno < 16) { - switch (fb->format->cpp[0] * 8) { - case 16: - ((uint32_t *) info->pseudo_palette)[regno] = v; - break; - case 24: - case 32: - ((uint32_t *) info->pseudo_palette)[regno] = v; - break; - } - } - - return 0; -} - -static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct drm_framebuffer *fb = vma->vm_private_data; - struct drm_device *dev = fb->dev; - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - struct psb_gem_object *pobj = to_psb_gem_object(fb->obj[0]); - int page_num; - int i; - unsigned long address; - vm_fault_t ret = VM_FAULT_SIGBUS; - unsigned long pfn; - unsigned long phys_addr = (unsigned long)dev_priv->stolen_base + pobj->offset; - - page_num = vma_pages(vma); - address = vmf->address - (vmf->pgoff << PAGE_SHIFT); - - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - - for (i = 0; i < page_num; i++) { - pfn = (phys_addr >> PAGE_SHIFT); - - ret = vmf_insert_mixed(vma, address, - __pfn_to_pfn_t(pfn, PFN_DEV)); - if (unlikely(ret & VM_FAULT_ERROR)) - break; - address += PAGE_SIZE; - phys_addr += PAGE_SIZE; - } - return ret; -} - -static void psbfb_vm_open(struct vm_area_struct *vma) -{ -} - -static void psbfb_vm_close(struct vm_area_struct *vma) -{ -} - -static const struct vm_operations_struct psbfb_vm_ops = { - .fault = psbfb_vm_fault, - .open = psbfb_vm_open, - .close = psbfb_vm_close -}; - -static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma) -{ - struct drm_fb_helper *fb_helper = info->par; - struct drm_framebuffer *fb = fb_helper->fb; - - if (vma->vm_pgoff != 0) - return -EINVAL; - if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) - return -EINVAL; - - /* - * If this is a GEM object then info->screen_base is the virtual - * kernel remapping of the object. FIXME: Review if this is - * suitable for our mmap work - */ - vma->vm_ops = &psbfb_vm_ops; - vma->vm_private_data = (void *)fb; - vm_flags_set(vma, VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP); - return 0; -} - -static const struct fb_ops psbfb_unaccel_ops = { - .owner = THIS_MODULE, - DRM_FB_HELPER_DEFAULT_OPS, - .fb_setcolreg = psbfb_setcolreg, - .fb_read = drm_fb_helper_cfb_read, - .fb_write = drm_fb_helper_cfb_write, - .fb_fillrect = drm_fb_helper_cfb_fillrect, - .fb_copyarea = drm_fb_helper_cfb_copyarea, - .fb_imageblit = drm_fb_helper_cfb_imageblit, - .fb_mmap = psbfb_mmap, -}; - /** * psb_framebuffer_init - initialize a framebuffer * @dev: our DRM device @@ -207,11 +67,9 @@ static int psb_framebuffer_init(struct drm_device *dev, * * TODO: review object references */ - -static struct drm_framebuffer *psb_framebuffer_create - (struct drm_device *dev, - const struct drm_mode_fb_cmd2 *mode_cmd, - struct drm_gem_object *obj) +struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object *obj) { struct drm_framebuffer *fb; int ret; @@ -229,93 +87,6 @@ static struct drm_framebuffer *psb_framebuffer_create } /** - * psbfb_create - create a framebuffer - * @fb_helper: the framebuffer helper - * @sizes: specification of the layout - * - * Create a framebuffer to the specifications provided - */ -static int psbfb_create(struct drm_fb_helper *fb_helper, - struct drm_fb_helper_surface_size *sizes) -{ - struct drm_device *dev = fb_helper->dev; - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - struct pci_dev *pdev = to_pci_dev(dev->dev); - struct fb_info *info; - struct drm_framebuffer *fb; - struct drm_mode_fb_cmd2 mode_cmd; - int size; - int ret; - struct psb_gem_object *backing; - struct drm_gem_object *obj; - u32 bpp, depth; - - mode_cmd.width = sizes->surface_width; - mode_cmd.height = sizes->surface_height; - bpp = sizes->surface_bpp; - depth = sizes->surface_depth; - - /* No 24bit packed */ - if (bpp == 24) - bpp = 32; - - mode_cmd.pitches[0] = ALIGN(mode_cmd.width * DIV_ROUND_UP(bpp, 8), 64); - - size = mode_cmd.pitches[0] * mode_cmd.height; - size = ALIGN(size, PAGE_SIZE); - - /* Allocate the framebuffer in the GTT with stolen page backing */ - backing = psb_gem_create(dev, size, "fb", true, PAGE_SIZE); - if (IS_ERR(backing)) - return PTR_ERR(backing); - obj = &backing->base; - - memset(dev_priv->vram_addr + backing->offset, 0, size); - - info = drm_fb_helper_alloc_info(fb_helper); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto err_drm_gem_object_put; - } - - mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); - - fb = psb_framebuffer_create(dev, &mode_cmd, obj); - if (IS_ERR(fb)) { - ret = PTR_ERR(fb); - goto err_drm_gem_object_put; - } - - fb_helper->fb = fb; - - info->fbops = &psbfb_unaccel_ops; - - info->fix.smem_start = dev_priv->fb_base; - info->fix.smem_len = size; - info->fix.ywrapstep = 0; - info->fix.ypanstep = 0; - - /* Accessed stolen memory directly */ - info->screen_base = dev_priv->vram_addr + backing->offset; - info->screen_size = size; - - drm_fb_helper_fill_info(info, fb_helper, sizes); - - info->fix.mmio_start = pci_resource_start(pdev, 0); - info->fix.mmio_len = pci_resource_len(pdev, 0); - - /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ - - dev_dbg(dev->dev, "allocated %dx%d fb\n", fb->width, fb->height); - - return 0; - -err_drm_gem_object_put: - drm_gem_object_put(obj); - return ret; -} - -/** * psb_user_framebuffer_create - create framebuffer * @dev: our DRM device * @filp: client file @@ -346,108 +117,8 @@ static struct drm_framebuffer *psb_user_framebuffer_create return fb; } -static int psbfb_probe(struct drm_fb_helper *fb_helper, - struct drm_fb_helper_surface_size *sizes) -{ - struct drm_device *dev = fb_helper->dev; - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - unsigned int fb_size; - int bytespp; - - bytespp = sizes->surface_bpp / 8; - if (bytespp == 3) /* no 24bit packed */ - bytespp = 4; - - /* If the mode will not fit in 32bit then switch to 16bit to get - a console on full resolution. The X mode setting server will - allocate its own 32bit GEM framebuffer */ - fb_size = ALIGN(sizes->surface_width * bytespp, 64) * - sizes->surface_height; - fb_size = ALIGN(fb_size, PAGE_SIZE); - - if (fb_size > dev_priv->vram_stolen_size) { - sizes->surface_bpp = 16; - sizes->surface_depth = 16; - } - - return psbfb_create(fb_helper, sizes); -} - -static const struct drm_fb_helper_funcs psb_fb_helper_funcs = { - .fb_probe = psbfb_probe, -}; - -static int psb_fbdev_destroy(struct drm_device *dev, - struct drm_fb_helper *fb_helper) -{ - struct drm_framebuffer *fb = fb_helper->fb; - - drm_fb_helper_unregister_info(fb_helper); - - drm_fb_helper_fini(fb_helper); - drm_framebuffer_unregister_private(fb); - drm_framebuffer_cleanup(fb); - - if (fb->obj[0]) - drm_gem_object_put(fb->obj[0]); - kfree(fb); - - return 0; -} - -int psb_fbdev_init(struct drm_device *dev) -{ - struct drm_fb_helper *fb_helper; - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - int ret; - - fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); - if (!fb_helper) { - dev_err(dev->dev, "no memory\n"); - return -ENOMEM; - } - - dev_priv->fb_helper = fb_helper; - - drm_fb_helper_prepare(dev, fb_helper, 32, &psb_fb_helper_funcs); - - ret = drm_fb_helper_init(dev, fb_helper); - if (ret) - goto free; - - /* disable all the possible outputs/crtcs before entering KMS mode */ - drm_helper_disable_unused_functions(dev); - - ret = drm_fb_helper_initial_config(fb_helper); - if (ret) - goto fini; - - return 0; - -fini: - drm_fb_helper_fini(fb_helper); -free: - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); - return ret; -} - -static void psb_fbdev_fini(struct drm_device *dev) -{ - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - - if (!dev_priv->fb_helper) - return; - - psb_fbdev_destroy(dev, dev_priv->fb_helper); - drm_fb_helper_unprepare(dev_priv->fb_helper); - kfree(dev_priv->fb_helper); - dev_priv->fb_helper = NULL; -} - static const struct drm_mode_config_funcs psb_mode_funcs = { .fb_create = psb_user_framebuffer_create, - .output_poll_changed = drm_fb_helper_output_poll_changed, }; static void psb_setup_outputs(struct drm_device *dev) @@ -515,7 +186,6 @@ void psb_modeset_init(struct drm_device *dev) { struct drm_psb_private *dev_priv = to_drm_psb_private(dev); struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; - struct pci_dev *pdev = to_pci_dev(dev->dev); int i; if (drmm_mode_config_init(dev)) @@ -526,10 +196,6 @@ void psb_modeset_init(struct drm_device *dev) dev->mode_config.funcs = &psb_mode_funcs; - /* set memory base */ - /* Oaktrail and Poulsbo should use BAR 2*/ - pci_read_config_dword(pdev, PSB_BSM, (u32 *)&(dev_priv->fb_base)); - /* num pipes is 2 for PSB but 1 for Mrst */ for (i = 0; i < dev_priv->num_pipe; i++) psb_intel_crtc_init(dev, i, mode_dev); @@ -550,6 +216,5 @@ void psb_modeset_cleanup(struct drm_device *dev) struct drm_psb_private *dev_priv = to_drm_psb_private(dev); if (dev_priv->modeset) { drm_kms_helper_poll_fini(dev); - psb_fbdev_fini(dev); } } diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index cd9c73f5a64a..2ce96b1b9c74 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -21,7 +21,6 @@ #include <drm/drm.h> #include <drm/drm_aperture.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_file.h> #include <drm/drm_ioctl.h> #include <drm/drm_pciids.h> @@ -387,7 +386,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags) dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ psb_modeset_init(dev); - psb_fbdev_init(dev); drm_kms_helper_poll_init(dev); /* Only add backlight support if we have LVDS or MIPI output */ @@ -452,6 +450,8 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) return ret; + psb_fbdev_setup(dev_priv); + return 0; } @@ -477,7 +477,6 @@ static const struct file_operations psb_gem_fops = { static const struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM, - .lastclose = drm_fb_helper_lastclose, .num_ioctls = ARRAY_SIZE(psb_ioctls), diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index a5df6d2f2cab..f7f709df99b4 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -193,8 +193,6 @@ #define KSEL_BYPASS_25 6 #define KSEL_BYPASS_83_100 7 -struct drm_fb_helper; - struct opregion_header; struct opregion_acpi; struct opregion_swsci; @@ -522,9 +520,6 @@ struct drm_psb_private { uint32_t blc_adj1; uint32_t blc_adj2; - struct drm_fb_helper *fb_helper; - resource_size_t fb_base; - bool dsr_enable; u32 dsr_fb_update; bool dpi_panel_on[3]; @@ -610,7 +605,19 @@ extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv); /* modesetting */ extern void psb_modeset_init(struct drm_device *dev); extern void psb_modeset_cleanup(struct drm_device *dev); -extern int psb_fbdev_init(struct drm_device *dev); + +/* framebuffer */ +struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object *obj); + +/* fbdev */ +#if defined(CONFIG_DRM_FBDEV_EMULATION) +void psb_fbdev_setup(struct drm_psb_private *dev_priv); +#else +static inline void psb_fbdev_setup(struct drm_psb_private *dev_priv) +{ } +#endif /* backlight.c */ int gma_backlight_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c index d421031462df..343c51250207 100644 --- a/drivers/gpu/drm/gma500/psb_irq.c +++ b/drivers/gpu/drm/gma500/psb_irq.c @@ -32,17 +32,6 @@ static inline u32 gma_pipestat(int pipe) BUG(); } -static inline u32 gma_pipe_event(int pipe) -{ - if (pipe == 0) - return _PSB_PIPEA_EVENT_FLAG; - if (pipe == 1) - return _MDFLD_PIPEB_EVENT_FLAG; - if (pipe == 2) - return _MDFLD_PIPEC_EVENT_FLAG; - BUG(); -} - static inline u32 gma_pipeconf(int pipe) { if (pipe == 0) diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 8e46f57e4569..057ef22fa9c6 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -195,6 +195,7 @@ i915-y += \ i915-y += \ gt/uc/intel_gsc_fw.o \ gt/uc/intel_gsc_uc.o \ + gt/uc/intel_gsc_uc_heci_cmd_submit.o\ gt/uc/intel_guc.o \ gt/uc/intel_guc_ads.o \ gt/uc/intel_guc_capture.o \ @@ -255,6 +256,7 @@ i915-y += \ display/intel_frontbuffer.o \ display/intel_global_state.o \ display/intel_hdcp.o \ + display/intel_hdcp_gsc.o \ display/intel_hotplug.o \ display/intel_hti.o \ display/intel_lpe_audio.o \ @@ -267,6 +269,7 @@ i915-y += \ display/intel_psr.o \ display/intel_quirks.o \ display/intel_sprite.o \ + display/intel_sprite_uapi.o \ display/intel_tc.o \ display/intel_vblank.o \ display/intel_vga.o \ diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c index 83aa3800245f..2910f5d0f3e2 100644 --- a/drivers/gpu/drm/i915/display/hsw_ips.c +++ b/drivers/gpu/drm/i915/display/hsw_ips.c @@ -267,3 +267,40 @@ void hsw_ips_get_config(struct intel_crtc_state *crtc_state) crtc_state->ips_enabled = true; } } + +static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused) +{ + struct drm_i915_private *i915 = m->private; + intel_wakeref_t wakeref; + + if (!HAS_IPS(i915)) + return -ENODEV; + + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + + seq_printf(m, "Enabled by kernel parameter: %s\n", + str_yes_no(i915->params.enable_ips)); + + if (DISPLAY_VER(i915) >= 8) { + seq_puts(m, "Currently: unknown\n"); + } else { + if (intel_de_read(i915, IPS_CTL) & IPS_ENABLE) + seq_puts(m, "Currently: enabled\n"); + else + seq_puts(m, "Currently: disabled\n"); + } + + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(hsw_ips_debugfs_status); + +void hsw_ips_debugfs_register(struct drm_i915_private *i915) +{ + struct drm_minor *minor = i915->drm.primary; + + debugfs_create_file("i915_ips_status", 0444, minor->debugfs_root, + i915, &hsw_ips_debugfs_status_fops); +} diff --git a/drivers/gpu/drm/i915/display/hsw_ips.h b/drivers/gpu/drm/i915/display/hsw_ips.h index 4564dee497d7..7ed6061874f7 100644 --- a/drivers/gpu/drm/i915/display/hsw_ips.h +++ b/drivers/gpu/drm/i915/display/hsw_ips.h @@ -8,6 +8,7 @@ #include <linux/types.h> +struct drm_i915_private; struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; @@ -22,5 +23,6 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state); int hsw_ips_compute_config(struct intel_atomic_state *state, struct intel_crtc *crtc); void hsw_ips_get_config(struct intel_crtc_state *crtc_state); +void hsw_ips_debugfs_register(struct drm_i915_private *i915); #endif /* __HSW_IPS_H__ */ diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 50dcaa895854..4ff10b00ffbd 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -1500,7 +1500,7 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder, gen11_dsi_get_timings(encoder, pipe_config); pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); - pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc); + pipe_config->pipe_bpp = bdw_get_pipe_misc_bpp(crtc); /* Get the details on which TE should be enabled */ if (is_cmd_mode(intel_dsi)) diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 719a60e278f3..40de9f0f171b 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -32,6 +32,7 @@ */ #include <drm/drm_atomic_helper.h> +#include <drm/drm_blend.h> #include <drm/drm_fourcc.h> #include "i915_config.h" @@ -42,7 +43,6 @@ #include "intel_display_types.h" #include "intel_fb.h" #include "intel_fb_pin.h" -#include "intel_sprite.h" #include "skl_scaler.h" #include "skl_watermark.h" @@ -940,6 +940,64 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state, return 0; } +int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) +{ + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + struct drm_rect *src = &plane_state->uapi.src; + u32 src_x, src_y, src_w, src_h, hsub, vsub; + bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation); + + /* + * FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS + * abuses hsub/vsub so we can't use them here. But as they + * are limited to 32bpp RGB formats we don't actually need + * to check anything. + */ + if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || + fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) + return 0; + + /* + * Hardware doesn't handle subpixel coordinates. + * Adjust to (macro)pixel boundary, but be careful not to + * increase the source viewport size, because that could + * push the downscaling factor out of bounds. + */ + src_x = src->x1 >> 16; + src_w = drm_rect_width(src) >> 16; + src_y = src->y1 >> 16; + src_h = drm_rect_height(src) >> 16; + + drm_rect_init(src, src_x << 16, src_y << 16, + src_w << 16, src_h << 16); + + if (fb->format->format == DRM_FORMAT_RGB565 && rotated) { + hsub = 2; + vsub = 2; + } else { + hsub = fb->format->hsub; + vsub = fb->format->vsub; + } + + if (rotated) + hsub = vsub = max(hsub, vsub); + + if (src_x % hsub || src_w % hsub) { + drm_dbg_kms(&i915->drm, "src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n", + src_x, src_w, hsub, str_yes_no(rotated)); + return -EINVAL; + } + + if (src_y % vsub || src_h % vsub) { + drm_dbg_kms(&i915->drm, "src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n", + src_y, src_h, vsub, str_yes_no(rotated)); + return -EINVAL; + } + + return 0; +} + /** * intel_prepare_plane_fb - Prepare fb for usage on plane * @_plane: drm plane to prepare for diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h index 74b6d3b169a7..191dad0efc8e 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h @@ -62,6 +62,7 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state, struct intel_crtc_state *crtc_state, int min_scale, int max_scale, bool can_position); +int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state); void intel_plane_set_invisible(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state); void intel_plane_helper_add(struct intel_plane *plane); diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 65151f5dcb15..3d5a9bbc6fde 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -983,11 +983,7 @@ void intel_audio_cdclk_change_pre(struct drm_i915_private *i915) static void get_aud_ts_cdclk_m_n(int refclk, int cdclk, struct aud_ts_cdclk_m_n *aud_ts) { - if (refclk == 24000) - aud_ts->m = 12; - else - aud_ts->m = 15; - + aud_ts->m = 60; aud_ts->n = cdclk * aud_ts->m / 24000; } diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index e54febd34ca9..75e69dffc5e9 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -141,8 +141,8 @@ struct bdb_block_entry { }; static const void * -find_section(struct drm_i915_private *i915, - enum bdb_block_id section_id) +bdb_find_section(struct drm_i915_private *i915, + enum bdb_block_id section_id) { struct bdb_block_entry *entry; @@ -201,7 +201,7 @@ static size_t lfp_data_min_size(struct drm_i915_private *i915) const struct bdb_lvds_lfp_data_ptrs *ptrs; size_t size; - ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS); + ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS); if (!ptrs) return 0; @@ -630,7 +630,7 @@ static int vbt_get_panel_type(struct drm_i915_private *i915, { const struct bdb_lvds_options *lvds_options; - lvds_options = find_section(i915, BDB_LVDS_OPTIONS); + lvds_options = bdb_find_section(i915, BDB_LVDS_OPTIONS); if (!lvds_options) return -1; @@ -671,11 +671,11 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915, dump_pnp_id(i915, edid_id, "EDID"); - ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS); + ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS); if (!ptrs) return -1; - data = find_section(i915, BDB_LVDS_LFP_DATA); + data = bdb_find_section(i915, BDB_LVDS_LFP_DATA); if (!data) return -1; @@ -791,7 +791,7 @@ parse_panel_options(struct drm_i915_private *i915, int panel_type = panel->vbt.panel_type; int drrs_mode; - lvds_options = find_section(i915, BDB_LVDS_OPTIONS); + lvds_options = bdb_find_section(i915, BDB_LVDS_OPTIONS); if (!lvds_options) return; @@ -881,11 +881,11 @@ parse_lfp_data(struct drm_i915_private *i915, const struct lvds_pnp_id *pnp_id; int panel_type = panel->vbt.panel_type; - ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS); + ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS); if (!ptrs) return; - data = find_section(i915, BDB_LVDS_LFP_DATA); + data = bdb_find_section(i915, BDB_LVDS_LFP_DATA); if (!data) return; @@ -932,7 +932,7 @@ parse_generic_dtd(struct drm_i915_private *i915, if (i915->display.vbt.version < 229) return; - generic_dtd = find_section(i915, BDB_GENERIC_DTD); + generic_dtd = bdb_find_section(i915, BDB_GENERIC_DTD); if (!generic_dtd) return; @@ -1011,7 +1011,7 @@ parse_lfp_backlight(struct drm_i915_private *i915, int panel_type = panel->vbt.panel_type; u16 level; - backlight_data = find_section(i915, BDB_LVDS_BACKLIGHT); + backlight_data = bdb_find_section(i915, BDB_LVDS_BACKLIGHT); if (!backlight_data) return; @@ -1119,14 +1119,14 @@ parse_sdvo_panel_data(struct drm_i915_private *i915, if (index == -1) { const struct bdb_sdvo_lvds_options *sdvo_lvds_options; - sdvo_lvds_options = find_section(i915, BDB_SDVO_LVDS_OPTIONS); + sdvo_lvds_options = bdb_find_section(i915, BDB_SDVO_LVDS_OPTIONS); if (!sdvo_lvds_options) return; index = sdvo_lvds_options->panel_type; } - dtds = find_section(i915, BDB_SDVO_PANEL_DTDS); + dtds = bdb_find_section(i915, BDB_SDVO_PANEL_DTDS); if (!dtds) return; @@ -1162,7 +1162,7 @@ parse_general_features(struct drm_i915_private *i915) { const struct bdb_general_features *general; - general = find_section(i915, BDB_GENERAL_FEATURES); + general = bdb_find_section(i915, BDB_GENERAL_FEATURES); if (!general) return; @@ -1285,7 +1285,7 @@ parse_driver_features(struct drm_i915_private *i915) { const struct bdb_driver_features *driver; - driver = find_section(i915, BDB_DRIVER_FEATURES); + driver = bdb_find_section(i915, BDB_DRIVER_FEATURES); if (!driver) return; @@ -1322,7 +1322,7 @@ parse_panel_driver_features(struct drm_i915_private *i915, { const struct bdb_driver_features *driver; - driver = find_section(i915, BDB_DRIVER_FEATURES); + driver = bdb_find_section(i915, BDB_DRIVER_FEATURES); if (!driver) return; @@ -1362,7 +1362,7 @@ parse_power_conservation_features(struct drm_i915_private *i915, if (i915->display.vbt.version < 228) return; - power = find_section(i915, BDB_LFP_POWER); + power = bdb_find_section(i915, BDB_LFP_POWER); if (!power) return; @@ -1402,7 +1402,7 @@ parse_edp(struct drm_i915_private *i915, const struct edp_fast_link_params *edp_link_params; int panel_type = panel->vbt.panel_type; - edp = find_section(i915, BDB_EDP); + edp = bdb_find_section(i915, BDB_EDP); if (!edp) return; @@ -1532,7 +1532,7 @@ parse_psr(struct drm_i915_private *i915, const struct psr_table *psr_table; int panel_type = panel->vbt.panel_type; - psr = find_section(i915, BDB_PSR); + psr = bdb_find_section(i915, BDB_PSR); if (!psr) { drm_dbg_kms(&i915->drm, "No PSR BDB found.\n"); return; @@ -1693,7 +1693,7 @@ parse_mipi_config(struct drm_i915_private *i915, /* Parse #52 for panel index used from panel_type already * parsed */ - start = find_section(i915, BDB_MIPI_CONFIG); + start = bdb_find_section(i915, BDB_MIPI_CONFIG); if (!start) { drm_dbg_kms(&i915->drm, "No MIPI config BDB found"); return; @@ -2005,7 +2005,7 @@ parse_mipi_sequence(struct drm_i915_private *i915, if (panel->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID) return; - sequence = find_section(i915, BDB_MIPI_SEQUENCE); + sequence = bdb_find_section(i915, BDB_MIPI_SEQUENCE); if (!sequence) { drm_dbg_kms(&i915->drm, "No MIPI Sequence found, parsing complete\n"); @@ -2086,7 +2086,7 @@ parse_compression_parameters(struct drm_i915_private *i915) if (i915->display.vbt.version < 198) return; - params = find_section(i915, BDB_COMPRESSION_PARAMETERS); + params = bdb_find_section(i915, BDB_COMPRESSION_PARAMETERS); if (params) { /* Sanity checks */ if (params->entry_size != sizeof(params->data[0])) { @@ -2792,7 +2792,7 @@ parse_general_definitions(struct drm_i915_private *i915) u16 block_size; int bus_pin; - defs = find_section(i915, BDB_GENERAL_DEFINITIONS); + defs = bdb_find_section(i915, BDB_GENERAL_DEFINITIONS); if (!defs) { drm_dbg_kms(&i915->drm, "No general definition block is found, no devices defined.\n"); diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index a6dd08598233..36aac88143ac 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -47,6 +47,11 @@ struct intel_color_funcs { */ void (*color_commit_arm)(const struct intel_crtc_state *crtc_state); /* + * Perform any extra tasks needed after all the + * double buffered registers have been latched. + */ + void (*color_post_update)(const struct intel_crtc_state *crtc_state); + /* * Load LUTs (and other single buffered color management * registers). Will (hopefully) be called during the vblank * following the latching of any double buffered registers @@ -614,9 +619,33 @@ static void ilk_lut_12p4_pack(struct drm_color_lut *entry, u32 ldw, u32 udw) static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state) { + /* + * Despite Wa_1406463849, ICL no longer suffers from the SKL + * DC5/PSR CSC black screen issue (see skl_color_commit_noarm()). + * Possibly due to the extra sticky CSC arming + * (see icl_color_post_update()). + * + * On TGL+ all CSC arming issues have been properly fixed. + */ icl_load_csc_matrix(crtc_state); } +static void skl_color_commit_noarm(const struct intel_crtc_state *crtc_state) +{ + /* + * Possibly related to display WA #1184, SKL CSC loses the latched + * CSC coeff/offset register values if the CSC registers are disarmed + * between DC5 exit and PSR exit. This will cause the plane(s) to + * output all black (until CSC_MODE is rearmed and properly latched). + * Once PSR exit (and proper register latching) has occurred the + * danger is over. Thus when PSR is enabled the CSC coeff/offset + * register programming will be peformed from skl_color_commit_arm() + * which is called after PSR exit. + */ + if (!crtc_state->has_psr) + ilk_load_csc_matrix(crtc_state); +} + static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state) { ilk_load_csc_matrix(crtc_state); @@ -659,6 +688,9 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state) enum pipe pipe = crtc->pipe; u32 val = 0; + if (crtc_state->has_psr) + ilk_load_csc_matrix(crtc_state); + /* * We don't (yet) allow userspace to control the pipe background color, * so force it to black, but apply pipe gamma and CSC appropriately @@ -677,6 +709,47 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state) crtc_state->csc_mode); } +static void icl_color_commit_arm(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + /* + * We don't (yet) allow userspace to control the pipe background color, + * so force it to black. + */ + intel_de_write(i915, SKL_BOTTOM_COLOR(pipe), 0); + + intel_de_write(i915, GAMMA_MODE(crtc->pipe), + crtc_state->gamma_mode); + + intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe), + crtc_state->csc_mode); +} + +static void icl_color_post_update(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + + /* + * Despite Wa_1406463849, ICL CSC is no longer disarmed by + * coeff/offset register *writes*. Instead, once CSC_MODE + * is armed it stays armed, even after it has been latched. + * Afterwards the coeff/offset registers become effectively + * self-arming. That self-arming must be disabled before the + * next icl_color_commit_noarm() tries to write the next set + * of coeff/offset registers. Fortunately register *reads* + * do still disarm the CSC. Naturally this must not be done + * until the previously written CSC registers have actually + * been latched. + * + * TGL+ no longer need this workaround. + */ + intel_de_read_fw(i915, PIPE_CSC_PREOFF_HI(crtc->pipe)); +} + static struct drm_property_blob * create_linear_lut(struct drm_i915_private *i915, int lut_size) { @@ -1376,6 +1449,14 @@ void intel_color_commit_arm(const struct intel_crtc_state *crtc_state) i915->display.funcs.color->color_commit_arm(crtc_state); } +void intel_color_post_update(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + + if (i915->display.funcs.color->color_post_update) + i915->display.funcs.color->color_post_update(crtc_state); +} + void intel_color_prepare_commit(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -3072,10 +3153,20 @@ static const struct intel_color_funcs i9xx_color_funcs = { .lut_equal = i9xx_lut_equal, }; +static const struct intel_color_funcs tgl_color_funcs = { + .color_check = icl_color_check, + .color_commit_noarm = icl_color_commit_noarm, + .color_commit_arm = icl_color_commit_arm, + .load_luts = icl_load_luts, + .read_luts = icl_read_luts, + .lut_equal = icl_lut_equal, +}; + static const struct intel_color_funcs icl_color_funcs = { .color_check = icl_color_check, .color_commit_noarm = icl_color_commit_noarm, - .color_commit_arm = skl_color_commit_arm, + .color_commit_arm = icl_color_commit_arm, + .color_post_update = icl_color_post_update, .load_luts = icl_load_luts, .read_luts = icl_read_luts, .lut_equal = icl_lut_equal, @@ -3083,7 +3174,7 @@ static const struct intel_color_funcs icl_color_funcs = { static const struct intel_color_funcs glk_color_funcs = { .color_check = glk_color_check, - .color_commit_noarm = ilk_color_commit_noarm, + .color_commit_noarm = skl_color_commit_noarm, .color_commit_arm = skl_color_commit_arm, .load_luts = glk_load_luts, .read_luts = glk_read_luts, @@ -3092,7 +3183,7 @@ static const struct intel_color_funcs glk_color_funcs = { static const struct intel_color_funcs skl_color_funcs = { .color_check = ivb_color_check, - .color_commit_noarm = ilk_color_commit_noarm, + .color_commit_noarm = skl_color_commit_noarm, .color_commit_arm = skl_color_commit_arm, .load_luts = bdw_load_luts, .read_luts = bdw_read_luts, @@ -3188,7 +3279,9 @@ void intel_color_init_hooks(struct drm_i915_private *i915) else i915->display.funcs.color = &i9xx_color_funcs; } else { - if (DISPLAY_VER(i915) >= 11) + if (DISPLAY_VER(i915) >= 12) + i915->display.funcs.color = &tgl_color_funcs; + else if (DISPLAY_VER(i915) == 11) i915->display.funcs.color = &icl_color_funcs; else if (DISPLAY_VER(i915) == 10) i915->display.funcs.color = &glk_color_funcs; diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h index d620b5b1e2a6..8002492be709 100644 --- a/drivers/gpu/drm/i915/display/intel_color.h +++ b/drivers/gpu/drm/i915/display/intel_color.h @@ -21,6 +21,7 @@ void intel_color_prepare_commit(struct intel_crtc_state *crtc_state); void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state); void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state); void intel_color_commit_arm(const struct intel_crtc_state *crtc_state); +void intel_color_post_update(const struct intel_crtc_state *crtc_state); void intel_color_load_luts(const struct intel_crtc_state *crtc_state); void intel_color_get_config(struct intel_crtc_state *crtc_state); bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c index b79a8834559f..ed45a6934854 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.c +++ b/drivers/gpu/drm/i915/display/intel_crtc.c @@ -212,7 +212,7 @@ static void intel_crtc_destroy(struct drm_crtc *_crtc) static int intel_crtc_late_register(struct drm_crtc *crtc) { - intel_crtc_debugfs_add(crtc); + intel_crtc_debugfs_add(to_intel_crtc(crtc)); return 0; } @@ -686,6 +686,14 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) */ intel_vrr_send_push(new_crtc_state); + /* + * Seamless M/N update may need to update frame timings. + * + * FIXME Should be synchronized with the start of vblank somehow... + */ + if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state)) + intel_crtc_update_active_timings(new_crtc_state); + local_irq_enable(); if (intel_vgpu_active(dev_priv)) diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c index c3173c0c2068..31bef0427377 100644 --- a/drivers/gpu/drm/i915/display/intel_cursor.c +++ b/drivers/gpu/drm/i915/display/intel_cursor.c @@ -21,7 +21,6 @@ #include "intel_fb_pin.h" #include "intel_frontbuffer.h" #include "intel_psr.h" -#include "intel_sprite.h" #include "skl_watermark.h" /* Cursor formats */ diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 0950bcfea4c0..73240cf78c8b 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -65,7 +65,6 @@ #include "intel_psr.h" #include "intel_quirks.h" #include "intel_snps_phy.h" -#include "intel_sprite.h" #include "intel_tc.h" #include "intel_vdsc.h" #include "intel_vdsc_regs.h" @@ -2520,6 +2519,10 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + if (HAS_DP20(dev_priv)) + intel_dp_128b132b_sdp_crc16(enc_to_intel_dp(encoder), + crtc_state); + if (DISPLAY_VER(dev_priv) >= 12) tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); else @@ -2618,8 +2621,7 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder, if (intel_crtc_has_dp_encoder(crtc_state)) intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), - DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK, - DP_TP_CTL_LINK_TRAIN_PAT1); + DP_TP_CTL_ENABLE, 0); /* Disable FEC in DP Sink */ intel_ddi_disable_fec_state(encoder, crtc_state); @@ -3140,8 +3142,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp, wait = true; } - dp_tp_ctl &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); - dp_tp_ctl |= DP_TP_CTL_LINK_TRAIN_PAT1; + dp_tp_ctl &= ~DP_TP_CTL_ENABLE; intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl); intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); @@ -3543,6 +3544,37 @@ static void icl_ddi_combo_get_config(struct intel_encoder *encoder, intel_ddi_get_config(encoder, crtc_state); } +static bool icl_ddi_tc_pll_is_tbt(const struct intel_shared_dpll *pll) +{ + return pll->info->id == DPLL_ID_ICL_TBTPLL; +} + +static enum icl_port_dpll_id +icl_ddi_tc_port_pll_type(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + const struct intel_shared_dpll *pll = crtc_state->shared_dpll; + + if (drm_WARN_ON(&i915->drm, !pll)) + return ICL_PORT_DPLL_DEFAULT; + + if (icl_ddi_tc_pll_is_tbt(pll)) + return ICL_PORT_DPLL_DEFAULT; + else + return ICL_PORT_DPLL_MG_PHY; +} + +enum icl_port_dpll_id +intel_ddi_port_pll_type(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + if (!encoder->port_pll_type) + return ICL_PORT_DPLL_DEFAULT; + + return encoder->port_pll_type(encoder, crtc_state); +} + static void icl_ddi_tc_get_clock(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct intel_shared_dpll *pll) @@ -3555,7 +3587,7 @@ static void icl_ddi_tc_get_clock(struct intel_encoder *encoder, if (drm_WARN_ON(&i915->drm, !pll)) return; - if (pll->info->id == DPLL_ID_ICL_TBTPLL) + if (icl_ddi_tc_pll_is_tbt(pll)) port_dpll_id = ICL_PORT_DPLL_DEFAULT; else port_dpll_id = ICL_PORT_DPLL_MG_PHY; @@ -3568,7 +3600,7 @@ static void icl_ddi_tc_get_clock(struct intel_encoder *encoder, icl_set_active_port_dpll(crtc_state, port_dpll_id); - if (crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL) + if (icl_ddi_tc_pll_is_tbt(crtc_state->shared_dpll)) crtc_state->port_clock = icl_calc_tbt_pll_link(i915, encoder->port); else crtc_state->port_clock = intel_dpll_get_freq(i915, crtc_state->shared_dpll, @@ -3610,7 +3642,8 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder, enum phy phy = intel_port_to_phy(i915, encoder->port); if (intel_phy_is_tc(i915, phy)) - intel_tc_port_sanitize_mode(enc_to_dig_port(encoder)); + intel_tc_port_sanitize_mode(enc_to_dig_port(encoder), + crtc_state); if (crtc_state && intel_crtc_has_dp_encoder(crtc_state)) intel_dp_sync_state(encoder, crtc_state); @@ -4404,6 +4437,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) encoder->enable_clock = jsl_ddi_tc_enable_clock; encoder->disable_clock = jsl_ddi_tc_disable_clock; encoder->is_clock_enabled = jsl_ddi_tc_is_clock_enabled; + encoder->port_pll_type = icl_ddi_tc_port_pll_type; encoder->get_config = icl_ddi_combo_get_config; } else { encoder->enable_clock = icl_ddi_combo_enable_clock; @@ -4416,6 +4450,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) encoder->enable_clock = icl_ddi_tc_enable_clock; encoder->disable_clock = icl_ddi_tc_disable_clock; encoder->is_clock_enabled = icl_ddi_tc_is_clock_enabled; + encoder->port_pll_type = icl_ddi_tc_port_pll_type; encoder->get_config = icl_ddi_tc_get_config; } else { encoder->enable_clock = icl_ddi_combo_enable_clock; @@ -4496,6 +4531,16 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) !intel_bios_encoder_supports_typec_usb(devdata) && !intel_bios_encoder_supports_tbt(devdata); + if (!is_legacy && init_hdmi) { + is_legacy = !init_dp; + + drm_dbg_kms(&dev_priv->drm, + "VBT says port %c is non-legacy TC and has HDMI (with DP: %s), assume it's %s\n", + port_name(port), + str_yes_no(init_dp), + is_legacy ? "legacy" : "non-legacy"); + } + intel_tc_port_init(dig_port, is_legacy); encoder->update_prepare = intel_ddi_update_prepare; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index 361f6874dde5..c85e74ae68e4 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -40,6 +40,9 @@ void hsw_ddi_enable_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); void hsw_ddi_disable_clock(struct intel_encoder *encoder); bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder); +enum icl_port_dpll_id +intel_ddi_port_pll_type(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); void hsw_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state); struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index edbcb1273ca2..5a386c7c0bc9 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -111,7 +111,6 @@ #include "intel_quirks.h" #include "intel_sdvo.h" #include "intel_snps_phy.h" -#include "intel_sprite.h" #include "intel_tc.h" #include "intel_tv.h" #include "intel_vblank.h" @@ -131,7 +130,7 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); -static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state); +static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state); static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state); /* returns HPLL frequency in kHz */ @@ -1116,6 +1115,9 @@ static void intel_post_plane_update(struct intel_atomic_state *state, if (needs_cursorclk_wa(old_crtc_state) && !needs_cursorclk_wa(new_crtc_state)) icl_wa_cursorclkgating(dev_priv, pipe, false); + + if (intel_crtc_needs_color_update(new_crtc_state)) + intel_color_post_update(new_crtc_state); } static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, @@ -1793,7 +1795,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, intel_set_pipe_src_size(new_crtc_state); if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) - bdw_set_pipemisc(new_crtc_state); + bdw_set_pipe_misc(new_crtc_state); if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) && !transcoder_is_dsi(cpu_transcoder)) @@ -2139,6 +2141,8 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state, intel_set_pipe_src_size(new_crtc_state); + intel_de_write(dev_priv, VLV_PIPE_MSA_MISC(pipe), 0); + if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY); intel_de_write(dev_priv, CHV_CANVAS(pipe), 0); @@ -3074,20 +3078,20 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc, } static enum intel_output_format -bdw_get_pipemisc_output_format(struct intel_crtc *crtc) +bdw_get_pipe_misc_output_format(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 tmp; - tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe)); + tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe)); - if (tmp & PIPEMISC_YUV420_ENABLE) { + if (tmp & PIPE_MISC_YUV420_ENABLE) { /* We support 4:2:0 in full blend mode only */ drm_WARN_ON(&dev_priv->drm, - (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0); + (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0); return INTEL_OUTPUT_FORMAT_YCBCR420; - } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) { + } else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) { return INTEL_OUTPUT_FORMAT_YCBCR444; } else { return INTEL_OUTPUT_FORMAT_RGB; @@ -3330,7 +3334,7 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); } -static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) +static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -3338,18 +3342,18 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) switch (crtc_state->pipe_bpp) { case 18: - val |= PIPEMISC_BPC_6; + val |= PIPE_MISC_BPC_6; break; case 24: - val |= PIPEMISC_BPC_8; + val |= PIPE_MISC_BPC_8; break; case 30: - val |= PIPEMISC_BPC_10; + val |= PIPE_MISC_BPC_10; break; case 36: /* Port output 12BPC defined for ADLP+ */ if (DISPLAY_VER(dev_priv) > 12) - val |= PIPEMISC_BPC_12_ADLP; + val |= PIPE_MISC_BPC_12_ADLP; break; default: MISSING_CASE(crtc_state->pipe_bpp); @@ -3357,38 +3361,38 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) } if (crtc_state->dither) - val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; + val |= PIPE_MISC_DITHER_ENABLE | PIPE_MISC_DITHER_TYPE_SP; if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) - val |= PIPEMISC_OUTPUT_COLORSPACE_YUV; + val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV; if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) - val |= PIPEMISC_YUV420_ENABLE | - PIPEMISC_YUV420_MODE_FULL_BLEND; + val |= PIPE_MISC_YUV420_ENABLE | + PIPE_MISC_YUV420_MODE_FULL_BLEND; if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state)) - val |= PIPEMISC_HDR_MODE_PRECISION; + val |= PIPE_MISC_HDR_MODE_PRECISION; if (DISPLAY_VER(dev_priv) >= 12) - val |= PIPEMISC_PIXEL_ROUNDING_TRUNC; + val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC; - intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val); + intel_de_write(dev_priv, PIPE_MISC(crtc->pipe), val); } -int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) +int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 tmp; - tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe)); + tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe)); - switch (tmp & PIPEMISC_BPC_MASK) { - case PIPEMISC_BPC_6: + switch (tmp & PIPE_MISC_BPC_MASK) { + case PIPE_MISC_BPC_6: return 18; - case PIPEMISC_BPC_8: + case PIPE_MISC_BPC_8: return 24; - case PIPEMISC_BPC_10: + case PIPE_MISC_BPC_10: return 30; /* * PORT OUTPUT 12 BPC defined for ADLP+. @@ -3400,7 +3404,7 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) * on older platforms, need to find a workaround for 12 BPC * MIPI DSI HW readout. */ - case PIPEMISC_BPC_12_ADLP: + case PIPE_MISC_BPC_12_ADLP: if (DISPLAY_VER(dev_priv) > 12) return 36; fallthrough; @@ -3981,7 +3985,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; } else { pipe_config->output_format = - bdw_get_pipemisc_output_format(crtc); + bdw_get_pipe_misc_output_format(crtc); } pipe_config->gamma_mode = intel_de_read(dev_priv, @@ -5079,6 +5083,7 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state, * only fields that are know to not cause problems are preserved. */ saved_state->uapi = crtc_state->uapi; + saved_state->inherited = crtc_state->inherited; saved_state->scaler_state = crtc_state->scaler_state; saved_state->shared_dpll = crtc_state->shared_dpll; saved_state->dpll_hw_state = crtc_state->dpll_hw_state; @@ -5904,68 +5909,6 @@ int intel_modeset_all_pipes(struct intel_atomic_state *state, return 0; } -void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct drm_display_mode adjusted_mode; - - drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode); - - if (crtc_state->vrr.enable) { - adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax; - adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax; - adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state); - crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state); - } - - drm_calc_timestamping_constants(&crtc->base, &adjusted_mode); - - crtc->mode_flags = crtc_state->mode_flags; - - /* - * The scanline counter increments at the leading edge of hsync. - * - * On most platforms it starts counting from vtotal-1 on the - * first active line. That means the scanline counter value is - * always one less than what we would expect. Ie. just after - * start of vblank, which also occurs at start of hsync (on the - * last active line), the scanline counter will read vblank_start-1. - * - * On gen2 the scanline counter starts counting from 1 instead - * of vtotal-1, so we have to subtract one (or rather add vtotal-1 - * to keep the value positive), instead of adding one. - * - * On HSW+ the behaviour of the scanline counter depends on the output - * type. For DP ports it behaves like most other platforms, but on HDMI - * there's an extra 1 line difference. So we need to add two instead of - * one to the value. - * - * On VLV/CHV DSI the scanline counter would appear to increment - * approx. 1/3 of a scanline before start of vblank. Unfortunately - * that means we can't tell whether we're in vblank or not while - * we're on that particular line. We must still set scanline_offset - * to 1 so that the vblank timestamps come out correct when we query - * the scanline counter from within the vblank interrupt handler. - * However if queried just before the start of vblank we'll get an - * answer that's slightly in the future. - */ - if (DISPLAY_VER(dev_priv) == 2) { - int vtotal; - - vtotal = adjusted_mode.crtc_vtotal; - if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) - vtotal /= 2; - - crtc->scanline_offset = vtotal - 1; - } else if (HAS_DDI(dev_priv) && - intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { - crtc->scanline_offset = 2; - } else { - crtc->scanline_offset = 1; - } -} - /* * This implements the workaround described in the "notes" section of the mode * set sequence documentation. When going from no pipes or single pipe to @@ -6970,7 +6913,7 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state, intel_color_commit_arm(new_crtc_state); if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) - bdw_set_pipemisc(new_crtc_state); + bdw_set_pipe_misc(new_crtc_state); if (intel_crtc_needs_fastset(new_crtc_state)) intel_pipe_fastset(old_crtc_state, new_crtc_state); @@ -7046,6 +6989,8 @@ static void intel_update_crtc(struct intel_atomic_state *state, intel_fbc_update(state, crtc); + drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF)); + if (!modeset && intel_crtc_needs_color_update(new_crtc_state)) intel_color_commit_noarm(new_crtc_state); @@ -7413,8 +7358,28 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) drm_atomic_helper_wait_for_dependencies(&state->base); drm_dp_mst_atomic_wait_for_dependencies(&state->base); - if (state->modeset) - wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); + /* + * During full modesets we write a lot of registers, wait + * for PLLs, etc. Doing that while DC states are enabled + * is not a good idea. + * + * During fastsets and other updates we also need to + * disable DC states due to the following scenario: + * 1. DC5 exit and PSR exit happen + * 2. Some or all _noarm() registers are written + * 3. Due to some long delay PSR is re-entered + * 4. DC5 entry -> DMC saves the already written new + * _noarm() registers and the old not yet written + * _arm() registers + * 5. DC5 exit -> DMC restores a mixture of old and + * new register values and arms the update + * 6. PSR exit -> hardware latches a mixture of old and + * new register values -> corrupted frame, or worse + * 7. New _arm() registers are finally written + * 8. Hardware finally latches a complete set of new + * register values, and subsequent frames will be OK again + */ + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF); intel_atomic_prepare_plane_clear_colors(state); @@ -7563,8 +7528,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) * the culprit. */ intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); - intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref); } + intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, wakeref); intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); /* @@ -8886,14 +8851,14 @@ void intel_display_driver_register(struct drm_i915_private *i915) if (!HAS_DISPLAY(i915)) return; - intel_display_debugfs_register(i915); - /* Must be done after probing outputs */ intel_opregion_register(i915); intel_acpi_video_register(i915); intel_audio_init(i915); + intel_display_debugfs_register(i915); + /* * Some ports require correctly set-up hpd registers for * detection to work properly (leading to ghost connected diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 50285fb4fcf5..596fd3ec1983 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -422,7 +422,6 @@ bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state); bool intel_pipe_config_compare(const struct intel_crtc_state *current_config, const struct intel_crtc_state *pipe_config, bool fastset); -void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state); void intel_plane_destroy(struct drm_plane *plane); void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); @@ -511,7 +510,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state); -int bdw_get_pipemisc_bpp(struct intel_crtc *crtc); +int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc); unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state); bool intel_plane_uses_fence(const struct intel_plane_state *plane_state); diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h index fdab7bb93a7d..0b5509f268a7 100644 --- a/drivers/gpu/drm/i915/display/intel_display_core.h +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -183,6 +183,17 @@ struct intel_hotplug { * blocked behind the non-DP one. */ struct workqueue_struct *dp_wq; + + /* + * Flag to track if long HPDs need not to be processed + * + * Some panels generate long HPDs while keep connected to the port. + * This can cause issues with CI tests results. In CI systems we + * don't expect to disconnect the panels and could ignore the long + * HPDs generated from the faulty panels. This flag can be used as + * cue to ignore the long HPDs and can be set / unset using debugfs. + */ + bool ignore_long_hpd; }; struct intel_vbt_data { @@ -384,9 +395,15 @@ struct intel_display { } gmbus; struct { - struct i915_hdcp_comp_master *master; + struct i915_hdcp_master *master; bool comp_added; + /* + * HDCP message struct for allocation of memory which can be + * reused when sending message to gsc cs. + * this is only populated post Meteorlake + */ + struct intel_hdcp_gsc_message *hdcp_message; /* Mutex to protect the above hdcp component related values. */ struct mutex comp_mutex; } hdcp; diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 1e654ddd0815..cc5026272558 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -8,6 +8,7 @@ #include <drm/drm_debugfs.h> #include <drm/drm_fourcc.h> +#include "hsw_ips.h" #include "i915_debugfs.h" #include "i915_irq.h" #include "i915_reg.h" @@ -48,33 +49,6 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) return 0; } -static int i915_ips_status(struct seq_file *m, void *unused) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - intel_wakeref_t wakeref; - - if (!HAS_IPS(dev_priv)) - return -ENODEV; - - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - - seq_printf(m, "Enabled by kernel parameter: %s\n", - str_yes_no(dev_priv->params.enable_ips)); - - if (DISPLAY_VER(dev_priv) >= 8) { - seq_puts(m, "Currently: unknown\n"); - } else { - if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE) - seq_puts(m, "Currently: enabled\n"); - else - seq_puts(m, "Currently: disabled\n"); - } - - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - - return 0; -} - static int i915_sr_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -168,269 +142,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) return 0; } -static int i915_psr_sink_status_show(struct seq_file *m, void *data) -{ - u8 val; - static const char * const sink_status[] = { - "inactive", - "transition to active, capture and display", - "active, display from RFB", - "active, capture and display on sink device timings", - "transition to inactive, capture and display, timing re-sync", - "reserved", - "reserved", - "sink internal error", - }; - struct drm_connector *connector = m->private; - struct intel_dp *intel_dp = - intel_attached_dp(to_intel_connector(connector)); - int ret; - - if (!CAN_PSR(intel_dp)) { - seq_puts(m, "PSR Unsupported\n"); - return -ENODEV; - } - - if (connector->status != connector_status_connected) - return -ENODEV; - - ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val); - - if (ret == 1) { - const char *str = "unknown"; - - val &= DP_PSR_SINK_STATE_MASK; - if (val < ARRAY_SIZE(sink_status)) - str = sink_status[val]; - seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str); - } else { - return ret; - } - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status); - -static void -psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) -{ - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - const char *status = "unknown"; - u32 val, status_val; - - if (intel_dp->psr.psr2_enabled) { - static const char * const live_status[] = { - "IDLE", - "CAPTURE", - "CAPTURE_FS", - "SLEEP", - "BUFON_FW", - "ML_UP", - "SU_STANDBY", - "FAST_SLEEP", - "DEEP_SLEEP", - "BUF_ON", - "TG_ON" - }; - val = intel_de_read(dev_priv, - EDP_PSR2_STATUS(intel_dp->psr.transcoder)); - status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val); - if (status_val < ARRAY_SIZE(live_status)) - status = live_status[status_val]; - } else { - static const char * const live_status[] = { - "IDLE", - "SRDONACK", - "SRDENT", - "BUFOFF", - "BUFON", - "AUXACK", - "SRDOFFACK", - "SRDENT_ON", - }; - val = intel_de_read(dev_priv, - EDP_PSR_STATUS(intel_dp->psr.transcoder)); - status_val = (val & EDP_PSR_STATUS_STATE_MASK) >> - EDP_PSR_STATUS_STATE_SHIFT; - if (status_val < ARRAY_SIZE(live_status)) - status = live_status[status_val]; - } - - seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val); -} - -static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) -{ - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - struct intel_psr *psr = &intel_dp->psr; - intel_wakeref_t wakeref; - const char *status; - bool enabled; - u32 val; - - seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support)); - if (psr->sink_support) - seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]); - seq_puts(m, "\n"); - - if (!psr->sink_support) - return 0; - - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - mutex_lock(&psr->lock); - - if (psr->enabled) - status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled"; - else - status = "disabled"; - seq_printf(m, "PSR mode: %s\n", status); - - if (!psr->enabled) { - seq_printf(m, "PSR sink not reliable: %s\n", - str_yes_no(psr->sink_not_reliable)); - - goto unlock; - } - - if (psr->psr2_enabled) { - val = intel_de_read(dev_priv, - EDP_PSR2_CTL(intel_dp->psr.transcoder)); - enabled = val & EDP_PSR2_ENABLE; - } else { - val = intel_de_read(dev_priv, - EDP_PSR_CTL(intel_dp->psr.transcoder)); - enabled = val & EDP_PSR_ENABLE; - } - seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", - str_enabled_disabled(enabled), val); - psr_source_status(intel_dp, m); - seq_printf(m, "Busy frontbuffer bits: 0x%08x\n", - psr->busy_frontbuffer_bits); - - /* - * SKL+ Perf counter is reset to 0 everytime DC state is entered - */ - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - val = intel_de_read(dev_priv, - EDP_PSR_PERF_CNT(intel_dp->psr.transcoder)); - val &= EDP_PSR_PERF_CNT_MASK; - seq_printf(m, "Performance counter: %u\n", val); - } - - if (psr->debug & I915_PSR_DEBUG_IRQ) { - seq_printf(m, "Last attempted entry at: %lld\n", - psr->last_entry_attempt); - seq_printf(m, "Last exit at: %lld\n", psr->last_exit); - } - - if (psr->psr2_enabled) { - u32 su_frames_val[3]; - int frame; - - /* - * Reading all 3 registers before hand to minimize crossing a - * frame boundary between register reads - */ - for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) { - val = intel_de_read(dev_priv, - PSR2_SU_STATUS(intel_dp->psr.transcoder, frame)); - su_frames_val[frame / 3] = val; - } - - seq_puts(m, "Frame:\tPSR2 SU blocks:\n"); - - for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) { - u32 su_blocks; - - su_blocks = su_frames_val[frame / 3] & - PSR2_SU_STATUS_MASK(frame); - su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame); - seq_printf(m, "%d\t%d\n", frame, su_blocks); - } - - seq_printf(m, "PSR2 selective fetch: %s\n", - str_enabled_disabled(psr->psr2_sel_fetch_enabled)); - } - -unlock: - mutex_unlock(&psr->lock); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - - return 0; -} - -static int i915_edp_psr_status(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct intel_dp *intel_dp = NULL; - struct intel_encoder *encoder; - - if (!HAS_PSR(dev_priv)) - return -ENODEV; - - /* Find the first EDP which supports PSR */ - for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { - intel_dp = enc_to_intel_dp(encoder); - break; - } - - if (!intel_dp) - return -ENODEV; - - return intel_psr_status(m, intel_dp); -} - -static int -i915_edp_psr_debug_set(void *data, u64 val) -{ - struct drm_i915_private *dev_priv = data; - struct intel_encoder *encoder; - intel_wakeref_t wakeref; - int ret = -ENODEV; - - if (!HAS_PSR(dev_priv)) - return ret; - - for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val); - - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - - // TODO: split to each transcoder's PSR debug state - ret = intel_psr_debug_set(intel_dp, val); - - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - } - - return ret; -} - -static int -i915_edp_psr_debug_get(void *data, u64 *val) -{ - struct drm_i915_private *dev_priv = data; - struct intel_encoder *encoder; - - if (!HAS_PSR(dev_priv)) - return -ENODEV; - - for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - // TODO: split to each transcoder's PSR debug state - *val = READ_ONCE(intel_dp->psr.debug); - return 0; - } - - return -ENODEV; -} - -DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, - i915_edp_psr_debug_get, i915_edp_psr_debug_set, - "%llu\n"); - static int i915_power_domain_info(struct seq_file *m, void *unused) { struct drm_i915_private *i915 = node_to_i915(m->private); @@ -831,10 +542,10 @@ static const struct file_operations crtc_updates_fops = { .write = crtc_updates_write }; -static void crtc_updates_add(struct drm_crtc *crtc) +static void crtc_updates_add(struct intel_crtc *crtc) { - debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry, - to_intel_crtc(crtc), &crtc_updates_fops); + debugfs_create_file("i915_update_info", 0644, crtc->base.debugfs_entry, + crtc, &crtc_updates_fops); } #else @@ -844,7 +555,7 @@ static void crtc_updates_info(struct seq_file *m, { } -static void crtc_updates_add(struct drm_crtc *crtc) +static void crtc_updates_add(struct intel_crtc *crtc) { } #endif @@ -1342,12 +1053,10 @@ static const struct file_operations i915_fifo_underrun_reset_ops = { static const struct drm_info_list intel_display_debugfs_list[] = { {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, - {"i915_ips_status", i915_ips_status, 0}, {"i915_sr_status", i915_sr_status, 0}, {"i915_opregion", i915_opregion, 0}, {"i915_vbt", i915_vbt, 0}, {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, - {"i915_edp_psr_status", i915_edp_psr_status, 0}, {"i915_power_domain_info", i915_power_domain_info, 0}, {"i915_display_info", i915_display_info, 0}, {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, @@ -1364,7 +1073,6 @@ static const struct { {"i915_dp_test_data", &i915_displayport_test_data_fops}, {"i915_dp_test_type", &i915_displayport_test_type_fops}, {"i915_dp_test_active", &i915_displayport_test_active_fops}, - {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}, }; void intel_display_debugfs_register(struct drm_i915_private *i915) @@ -1384,9 +1092,11 @@ void intel_display_debugfs_register(struct drm_i915_private *i915) ARRAY_SIZE(intel_display_debugfs_list), minor->debugfs_root, minor); + hsw_ips_debugfs_register(i915); intel_dmc_debugfs_register(i915); intel_fbc_debugfs_register(i915); intel_hpd_debugfs_register(i915); + intel_psr_debugfs_register(i915); intel_wm_debugfs_register(i915); } @@ -1439,16 +1149,6 @@ out: } DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability); -static int i915_psr_status_show(struct seq_file *m, void *data) -{ - struct drm_connector *connector = m->private; - struct intel_dp *intel_dp = - intel_attached_dp(to_intel_connector(connector)); - - return intel_psr_status(m, intel_dp); -} -DEFINE_SHOW_ATTRIBUTE(i915_psr_status); - static int i915_lpsp_capability_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; @@ -1666,7 +1366,7 @@ static const struct file_operations i915_dsc_bpc_fops = { */ static int i915_current_bpc_show(struct seq_file *m, void *data) { - struct intel_crtc *crtc = to_intel_crtc(m->private); + struct intel_crtc *crtc = m->private; struct intel_crtc_state *crtc_state; int ret; @@ -1683,6 +1383,17 @@ static int i915_current_bpc_show(struct seq_file *m, void *data) } DEFINE_SHOW_ATTRIBUTE(i915_current_bpc); +/* Pipe may differ from crtc index if pipes are fused off */ +static int intel_crtc_pipe_show(struct seq_file *m, void *unused) +{ + struct intel_crtc *crtc = m->private; + + seq_printf(m, "%c\n", pipe_name(crtc->pipe)); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(intel_crtc_pipe); + /** * intel_connector_debugfs_add - add i915 specific connector debugfs files * @connector: pointer to a registered drm_connector @@ -1701,19 +1412,11 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector) return; intel_drrs_connector_debugfs_add(intel_connector); + intel_psr_connector_debugfs_add(intel_connector); - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) debugfs_create_file("i915_panel_timings", S_IRUGO, root, connector, &i915_panel_fops); - debugfs_create_file("i915_psr_sink_status", S_IRUGO, root, - connector, &i915_psr_sink_status_fops); - } - - if (HAS_PSR(dev_priv) && - connector->connector_type == DRM_MODE_CONNECTOR_eDP) { - debugfs_create_file("i915_psr_status", 0444, root, - connector, &i915_psr_status_fops); - } if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || @@ -1748,15 +1451,19 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector) * * Failure to add debugfs entries should generally be ignored. */ -void intel_crtc_debugfs_add(struct drm_crtc *crtc) +void intel_crtc_debugfs_add(struct intel_crtc *crtc) { - if (!crtc->debugfs_entry) + struct dentry *root = crtc->base.debugfs_entry; + + if (!root) return; crtc_updates_add(crtc); - intel_drrs_crtc_debugfs_add(to_intel_crtc(crtc)); - intel_fbc_crtc_debugfs_add(to_intel_crtc(crtc)); + intel_drrs_crtc_debugfs_add(crtc); + intel_fbc_crtc_debugfs_add(crtc); - debugfs_create_file("i915_current_bpc", 0444, crtc->debugfs_entry, crtc, + debugfs_create_file("i915_current_bpc", 0444, root, crtc, &i915_current_bpc_fops); + debugfs_create_file("i915_pipe", 0444, root, crtc, + &intel_crtc_pipe_fops); } diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h index d3a79c07c384..e1f479b7acd1 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.h +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h @@ -6,18 +6,18 @@ #ifndef __INTEL_DISPLAY_DEBUGFS_H__ #define __INTEL_DISPLAY_DEBUGFS_H__ -struct drm_crtc; struct drm_i915_private; struct intel_connector; +struct intel_crtc; #ifdef CONFIG_DEBUG_FS void intel_display_debugfs_register(struct drm_i915_private *i915); void intel_connector_debugfs_add(struct intel_connector *connector); -void intel_crtc_debugfs_add(struct drm_crtc *crtc); +void intel_crtc_debugfs_add(struct intel_crtc *crtc); #else static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {} static inline void intel_connector_debugfs_add(struct intel_connector *connector) {} -static inline void intel_crtc_debugfs_add(struct drm_crtc *crtc) {} +static inline void intel_crtc_debugfs_add(struct intel_crtc *crtc) {} #endif #endif /* __INTEL_DISPLAY_DEBUGFS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index f085ae971150..f86060195987 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -1625,6 +1625,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, intel_power_well_enable(dev_priv, well); mutex_unlock(&power_domains->lock); + if (DISPLAY_VER(dev_priv) == 14) + intel_de_rmw(dev_priv, DC_STATE_EN, + HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH, 0); + /* 4. Enable CDCLK. */ intel_cdclk_init_hw(dev_priv); @@ -1678,6 +1682,10 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv) /* 3. Disable CD clock */ intel_cdclk_uninit_hw(dev_priv); + if (DISPLAY_VER(dev_priv) == 14) + intel_de_rmw(dev_priv, DC_STATE_EN, 0, + HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH); + /* * 4. Disable Power Well 1 (PG1). * The AUX IO power wells are toggled on demand, so they are already diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index c32bfba06ca1..ab146b5b68bd 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -43,7 +43,7 @@ #include <drm/drm_rect.h> #include <drm/drm_vblank.h> #include <drm/drm_vblank_work.h> -#include <drm/i915_mei_hdcp_interface.h> +#include <drm/i915_hdcp_interface.h> #include <media/cec-notifier.h> #include "i915_vma.h" @@ -255,6 +255,11 @@ struct intel_encoder { * Returns whether the port clock is enabled or not. */ bool (*is_clock_enabled)(struct intel_encoder *encoder); + /* + * Returns the PLL type the port uses. + */ + enum icl_port_dpll_id (*port_pll_type)(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); const struct intel_ddi_buf_trans *(*get_buf_trans)(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries); @@ -1783,6 +1788,7 @@ struct intel_digital_port { bool tc_legacy_port:1; char tc_port_name[8]; enum tc_port_mode tc_mode; + enum tc_port_mode tc_init_mode; enum phy_fia tc_phy_fia; u8 tc_phy_fia_idx; diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 6b162f77340e..8a88de67ff0a 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -89,10 +89,13 @@ static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915) __stringify(major) "_" \ __stringify(minor) ".bin" +#define XELPDP_DMC_MAX_FW_SIZE 0x7000 #define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000 - #define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE +#define MTL_DMC_PATH DMC_PATH(mtl) +MODULE_FIRMWARE(MTL_DMC_PATH); + #define DG2_DMC_PATH DMC_LEGACY_PATH(dg2, 2, 08) MODULE_FIRMWARE(DG2_DMC_PATH); @@ -424,15 +427,12 @@ static void disable_all_event_handlers(struct drm_i915_private *i915) } } -static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) +static void adlp_pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) { enum pipe pipe; - if (DISPLAY_VER(i915) < 13) - return; - /* - * Wa_16015201720:adl-p,dg2, mtl + * Wa_16015201720:adl-p,dg2 * The WA requires clock gating to be disabled all the time * for pipe A and B. * For pipe C and D clock gating needs to be disabled only @@ -448,6 +448,25 @@ static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) PIPEDMC_GATING_DIS, 0); } +static void mtl_pipedmc_clock_gating_wa(struct drm_i915_private *i915) +{ + /* + * Wa_16015201720 + * The WA requires clock gating to be disabled all the time + * for pipe A and B. + */ + intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0, + MTL_PIPEDMC_GATING_DIS_A | MTL_PIPEDMC_GATING_DIS_B); +} + +static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) +{ + if (DISPLAY_VER(i915) >= 14 && enable) + mtl_pipedmc_clock_gating_wa(i915); + else if (DISPLAY_VER(i915) == 13) + adlp_pipedmc_clock_gating_wa(i915, enable); +} + void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe) { enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); @@ -979,7 +998,10 @@ void intel_dmc_init(struct drm_i915_private *i915) INIT_WORK(&dmc->work, dmc_load_work_fn); - if (IS_DG2(i915)) { + if (IS_METEORLAKE(i915)) { + dmc->fw_path = MTL_DMC_PATH; + dmc->max_fw_size = XELPDP_DMC_MAX_FW_SIZE; + } else if (IS_DG2(i915)) { dmc->fw_path = DG2_DMC_PATH; dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; } else if (IS_ALDERLAKE_P(i915)) { diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index aee93b0d810e..da1c00ee92fb 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -687,6 +687,12 @@ u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 p /* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */ if (DISPLAY_VER(i915) >= 13) { bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1); + + /* + * According to BSpec, 27 is the max DSC output bpp, + * 8 is the min DSC output bpp + */ + bits_per_pixel = clamp_t(u32, bits_per_pixel, 8, 27); } else { /* Find the nearest match in the array of known BPPs from VESA */ for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { @@ -716,9 +722,19 @@ u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, * (LinkSymbolClock)* 8 * (TimeSlots / 64) * for SST -> TimeSlots is 64(i.e all TimeSlots that are available) * for MST -> TimeSlots has to be calculated, based on mode requirements + * + * Due to FEC overhead, the available bw is reduced to 97.2261%. + * To support the given mode: + * Bandwidth required should be <= Available link Bandwidth * FEC Overhead + * =>ModeClock * bits_per_pixel <= Available Link Bandwidth * FEC Overhead + * =>bits_per_pixel <= Available link Bandwidth * FEC Overhead / ModeClock + * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock) * 8 (TimeSlots / 64) / + * (ModeClock / FEC Overhead) + * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock * TimeSlots) / + * (ModeClock / FEC Overhead * 8) */ - bits_per_pixel = DIV_ROUND_UP((link_clock * lane_count) * timeslots, - intel_dp_mode_to_fec_clock(mode_clock) * 8); + bits_per_pixel = ((link_clock * lane_count) * timeslots) / + (intel_dp_mode_to_fec_clock(mode_clock) * 8); drm_dbg_kms(&i915->drm, "Max link bpp is %u for %u timeslots " "total bw %u pixel clock %u\n", @@ -771,6 +787,13 @@ u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, min_slice_count = DIV_ROUND_UP(mode_clock, DP_DSC_MAX_ENC_THROUGHPUT_1); + /* + * Due to some DSC engine BW limitations, we need to enable second + * slice and VDSC engine, whenever we approach close enough to max CDCLK + */ + if (mode_clock >= ((i915->display.cdclk.max_cdclk_freq * 85) / 100)) + min_slice_count = max_t(u8, min_slice_count, 2); + max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { drm_dbg_kms(&i915->drm, @@ -1597,16 +1620,8 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, * is greater than the maximum Cdclock and if slice count is even * then we need to use 2 VDSC instances. */ - if (adjusted_mode->crtc_clock > dev_priv->display.cdclk.max_cdclk_freq || - pipe_config->bigjoiner_pipes) { - if (pipe_config->dsc.slice_count > 1) { - pipe_config->dsc.dsc_split = true; - } else { - drm_dbg_kms(&dev_priv->drm, - "Cannot split stream to use 2 VDSC instances\n"); - return -EINVAL; - } - } + if (pipe_config->bigjoiner_pipes || pipe_config->dsc.slice_count > 1) + pipe_config->dsc.dsc_split = true; ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); if (ret < 0) { diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c index 96967e21c94c..eb07dc5d8709 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c @@ -205,8 +205,19 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, for (i = 0; i < ARRAY_SIZE(ch_data); i++) ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); - if (is_tc_port) + if (is_tc_port) { intel_tc_port_lock(dig_port); + /* + * Abort transfers on a disconnected port as required by + * DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX + * timeouts that would otherwise happen. + * TODO: abort the transfer on non-TC ports as well. + */ + if (!intel_tc_port_connected_locked(&dig_port->base)) { + ret = -ENXIO; + goto out_unlock; + } + } aux_domain = intel_aux_power_domain(dig_port); @@ -367,7 +378,7 @@ out: intel_pps_unlock(intel_dp, pps_wakeref); intel_display_power_put_async(i915, aux_domain, aux_wakeref); - +out_unlock: if (is_tc_port) intel_tc_port_unlock(dig_port); diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 3d3efcf02011..d638054c74ac 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -1379,10 +1379,6 @@ intel_dp_128b132b_lane_cds(struct intel_dp *intel_dp, } } - /* FIXME: Should DP_TRAINING_PATTERN_DISABLE be written first? */ - if (intel_dp->set_idle_link_train) - intel_dp->set_idle_link_train(intel_dp, crtc_state); - return true; } @@ -1433,7 +1429,11 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp, void intel_dp_start_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_connector *connector = intel_dp->attached_connector; + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; bool passed; + /* * TODO: Reiniting LTTPRs here won't be needed once proper connector * HW state readout is added. @@ -1451,6 +1451,46 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp, else passed = intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count); + /* + * Ignore the link failure in CI + * + * In fixed enviroments like CI, sometimes unexpected long HPDs are + * generated by the displays. If ignore_long_hpd flag is set, such long + * HPDs are ignored. And probably as a consequence of these ignored + * long HPDs, subsequent link trainings are failed resulting into CI + * execution failures. + * + * For test cases which rely on the link training or processing of HPDs + * ignore_long_hpd flag can unset from the testcase. + */ + if (!passed && i915->display.hotplug.ignore_long_hpd) { + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s][ENCODER:%d:%s] Ignore the link failure\n", + connector->base.base.id, connector->base.name, + encoder->base.base.id, encoder->base.name); + return; + } + if (!passed) intel_dp_schedule_fallback_link_training(intel_dp, crtc_state); } + +void intel_dp_128b132b_sdp_crc16(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + /* + * VIDEO_DIP_CTL register bit 31 should be set to '0' to not + * disable SDP CRC. This is applicable for Display version 13. + * Default value of bit 31 is '0' hence discarding the write + * TODO: Corrective actions on SDP corruption yet to be defined + */ + if (intel_dp_is_uhbr(crtc_state)) + /* DP v2.0 SCR on SDP CRC16 for 128b/132b Link Layer */ + drm_dp_dpcd_writeb(&intel_dp->aux, + DP_SDP_ERROR_DETECTION_CONFIGURATION, + DP_SDP_CRC16_128B132B_EN); + + drm_dbg_kms(&i915->drm, "DP2.0 SDP CRC16 for 128b/132b enabled\n"); +} diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h index 7fa1c0833096..2c8f2775891b 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h @@ -39,4 +39,6 @@ static inline u8 intel_dp_training_pattern_symbol(u8 pattern) return pattern & ~DP_LINK_SCRAMBLING_DISABLE; } +void intel_dp_128b132b_sdp_crc16(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); #endif /* __INTEL_DP_LINK_TRAINING_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index 3659350061a7..673bcdfb7ff6 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -210,6 +210,7 @@ static int intelfb_create(struct drm_fb_helper *helper, bool prealloc = false; void __iomem *vaddr; struct drm_i915_gem_object *obj; + struct i915_gem_ww_ctx ww; int ret; mutex_lock(&ifbdev->hpd_lock); @@ -283,13 +284,24 @@ static int intelfb_create(struct drm_fb_helper *helper, info->fix.smem_len = vma->size; } - vaddr = i915_vma_pin_iomap(vma); - if (IS_ERR(vaddr)) { - drm_err(&dev_priv->drm, - "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr); - ret = PTR_ERR(vaddr); - goto out_unpin; + for_i915_gem_ww(&ww, ret, false) { + ret = i915_gem_object_lock(vma->obj, &ww); + + if (ret) + continue; + + vaddr = i915_vma_pin_iomap(vma); + if (IS_ERR(vaddr)) { + drm_err(&dev_priv->drm, + "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr); + ret = PTR_ERR(vaddr); + continue; + } } + + if (ret) + goto out_unpin; + info->screen_base = vaddr; info->screen_size = vma->size; diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c index f55b4893c00f..c08c26a321b3 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.c +++ b/drivers/gpu/drm/i915/display/intel_fdi.c @@ -845,9 +845,7 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E)); /* Disable DP_TP_CTL and FDI_RX_CTL and retry */ - intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), - DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK, - DP_TP_CTL_LINK_TRAIN_PAT1); + intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0); intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E)); intel_wait_ddi_buf_idle(dev_priv, PORT_E); diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 2984d2810e42..650232c4892b 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -23,6 +23,7 @@ #include "intel_display_power_well.h" #include "intel_display_types.h" #include "intel_hdcp.h" +#include "intel_hdcp_gsc.h" #include "intel_hdcp_regs.h" #include "intel_pcode.h" @@ -203,13 +204,20 @@ bool intel_hdcp2_capable(struct intel_connector *connector) struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; + struct intel_gt *gt = dev_priv->media_gt; + struct intel_gsc_uc *gsc = >->uc.gsc; bool capable = false; /* I915 support for HDCP2.2 */ if (!hdcp->hdcp2_supported) return false; - /* MEI interface is solid */ + /* If MTL+ make sure gsc is loaded and proxy is setup */ + if (intel_hdcp_gsc_cs_required(dev_priv)) + if (!intel_uc_fw_is_running(&gsc->fw)) + return false; + + /* MEI/GSC interface is solid depending on which is used */ mutex_lock(&dev_priv->display.hdcp.comp_mutex); if (!dev_priv->display.hdcp.comp_added || !dev_priv->display.hdcp.master) { mutex_unlock(&dev_priv->display.hdcp.comp_mutex); @@ -1142,18 +1150,18 @@ hdcp2_prepare_ake_init(struct intel_connector *connector, struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct i915_hdcp_comp_master *comp; + struct i915_hdcp_master *arbiter; int ret; mutex_lock(&dev_priv->display.hdcp.comp_mutex); - comp = dev_priv->display.hdcp.master; + arbiter = dev_priv->display.hdcp.master; - if (!comp || !comp->ops) { + if (!arbiter || !arbiter->ops) { mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } - ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data); + ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data); if (ret) drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n", ret); @@ -1172,18 +1180,18 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct i915_hdcp_comp_master *comp; + struct i915_hdcp_master *arbiter; int ret; mutex_lock(&dev_priv->display.hdcp.comp_mutex); - comp = dev_priv->display.hdcp.master; + arbiter = dev_priv->display.hdcp.master; - if (!comp || !comp->ops) { + if (!arbiter || !arbiter->ops) { mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } - ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data, + ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data, rx_cert, paired, ek_pub_km, msg_sz); if (ret < 0) @@ -1200,18 +1208,18 @@ static int hdcp2_verify_hprime(struct intel_connector *connector, struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct i915_hdcp_comp_master *comp; + struct i915_hdcp_master *arbiter; int ret; mutex_lock(&dev_priv->display.hdcp.comp_mutex); - comp = dev_priv->display.hdcp.master; + arbiter = dev_priv->display.hdcp.master; - if (!comp || !comp->ops) { + if (!arbiter || !arbiter->ops) { mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } - ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime); + ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime); if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret); mutex_unlock(&dev_priv->display.hdcp.comp_mutex); @@ -1226,18 +1234,18 @@ hdcp2_store_pairing_info(struct intel_connector *connector, struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct i915_hdcp_comp_master *comp; + struct i915_hdcp_master *arbiter; int ret; mutex_lock(&dev_priv->display.hdcp.comp_mutex); - comp = dev_priv->display.hdcp.master; + arbiter = dev_priv->display.hdcp.master; - if (!comp || !comp->ops) { + if (!arbiter || !arbiter->ops) { mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } - ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info); + ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info); if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n", ret); @@ -1253,18 +1261,18 @@ hdcp2_prepare_lc_init(struct intel_connector *connector, struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct i915_hdcp_comp_master *comp; + struct i915_hdcp_master *arbiter; int ret; mutex_lock(&dev_priv->display.hdcp.comp_mutex); - comp = dev_priv->display.hdcp.master; + arbiter = dev_priv->display.hdcp.master; - if (!comp || !comp->ops) { + if (!arbiter || !arbiter->ops) { mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } - ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init); + ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init); if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n", ret); @@ -1280,18 +1288,18 @@ hdcp2_verify_lprime(struct intel_connector *connector, struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct i915_hdcp_comp_master *comp; + struct i915_hdcp_master *arbiter; int ret; mutex_lock(&dev_priv->display.hdcp.comp_mutex); - comp = dev_priv->display.hdcp.master; + arbiter = dev_priv->display.hdcp.master; - if (!comp || !comp->ops) { + if (!arbiter || !arbiter->ops) { mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } - ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime); + ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime); if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n", ret); @@ -1306,18 +1314,18 @@ static int hdcp2_prepare_skey(struct intel_connector *connector, struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct i915_hdcp_comp_master *comp; + struct i915_hdcp_master *arbiter; int ret; mutex_lock(&dev_priv->display.hdcp.comp_mutex); - comp = dev_priv->display.hdcp.master; + arbiter = dev_priv->display.hdcp.master; - if (!comp || !comp->ops) { + if (!arbiter || !arbiter->ops) { mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } - ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data); + ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data); if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n", ret); @@ -1335,20 +1343,21 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct i915_hdcp_comp_master *comp; + struct i915_hdcp_master *arbiter; int ret; mutex_lock(&dev_priv->display.hdcp.comp_mutex); - comp = dev_priv->display.hdcp.master; + arbiter = dev_priv->display.hdcp.master; - if (!comp || !comp->ops) { + if (!arbiter || !arbiter->ops) { mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } - ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data, - rep_topology, - rep_send_ack); + ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev, + data, + rep_topology, + rep_send_ack); if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Verify rep topology failed. %d\n", ret); @@ -1364,18 +1373,18 @@ hdcp2_verify_mprime(struct intel_connector *connector, struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct i915_hdcp_comp_master *comp; + struct i915_hdcp_master *arbiter; int ret; mutex_lock(&dev_priv->display.hdcp.comp_mutex); - comp = dev_priv->display.hdcp.master; + arbiter = dev_priv->display.hdcp.master; - if (!comp || !comp->ops) { + if (!arbiter || !arbiter->ops) { mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } - ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready); + ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready); if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret); mutex_unlock(&dev_priv->display.hdcp.comp_mutex); @@ -1388,18 +1397,18 @@ static int hdcp2_authenticate_port(struct intel_connector *connector) struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct i915_hdcp_comp_master *comp; + struct i915_hdcp_master *arbiter; int ret; mutex_lock(&dev_priv->display.hdcp.comp_mutex); - comp = dev_priv->display.hdcp.master; + arbiter = dev_priv->display.hdcp.master; - if (!comp || !comp->ops) { + if (!arbiter || !arbiter->ops) { mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } - ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data); + ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data); if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n", ret); @@ -1408,22 +1417,22 @@ static int hdcp2_authenticate_port(struct intel_connector *connector) return ret; } -static int hdcp2_close_mei_session(struct intel_connector *connector) +static int hdcp2_close_session(struct intel_connector *connector) { struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct i915_hdcp_comp_master *comp; + struct i915_hdcp_master *arbiter; int ret; mutex_lock(&dev_priv->display.hdcp.comp_mutex); - comp = dev_priv->display.hdcp.master; + arbiter = dev_priv->display.hdcp.master; - if (!comp || !comp->ops) { + if (!arbiter || !arbiter->ops) { mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } - ret = comp->ops->close_hdcp_session(comp->mei_dev, + ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev, &dig_port->hdcp_port_data); mutex_unlock(&dev_priv->display.hdcp.comp_mutex); @@ -1432,7 +1441,7 @@ static int hdcp2_close_mei_session(struct intel_connector *connector) static int hdcp2_deauthenticate_port(struct intel_connector *connector) { - return hdcp2_close_mei_session(connector); + return hdcp2_close_session(connector); } /* Authentication flow starts from here */ @@ -2142,8 +2151,8 @@ static int i915_hdcp_component_bind(struct device *i915_kdev, drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n"); mutex_lock(&dev_priv->display.hdcp.comp_mutex); - dev_priv->display.hdcp.master = (struct i915_hdcp_comp_master *)data; - dev_priv->display.hdcp.master->mei_dev = mei_kdev; + dev_priv->display.hdcp.master = (struct i915_hdcp_master *)data; + dev_priv->display.hdcp.master->hdcp_dev = mei_kdev; mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return 0; @@ -2160,30 +2169,30 @@ static void i915_hdcp_component_unbind(struct device *i915_kdev, mutex_unlock(&dev_priv->display.hdcp.comp_mutex); } -static const struct component_ops i915_hdcp_component_ops = { +static const struct component_ops i915_hdcp_ops = { .bind = i915_hdcp_component_bind, .unbind = i915_hdcp_component_unbind, }; -static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port) +static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port) { switch (port) { case PORT_A: - return MEI_DDI_A; + return HDCP_DDI_A; case PORT_B ... PORT_F: - return (enum mei_fw_ddi)port; + return (enum hdcp_ddi)port; default: - return MEI_DDI_INVALID_PORT; + return HDCP_DDI_INVALID_PORT; } } -static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder) +static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder) { switch (cpu_transcoder) { case TRANSCODER_A ... TRANSCODER_D: - return (enum mei_fw_tc)(cpu_transcoder | 0x10); + return (enum hdcp_transcoder)(cpu_transcoder | 0x10); default: /* eDP, DSI TRANSCODERS are non HDCP capable */ - return MEI_INVALID_TRANSCODER; + return HDCP_INVALID_TRANSCODER; } } @@ -2197,20 +2206,20 @@ static int initialize_hdcp_port_data(struct intel_connector *connector, enum port port = dig_port->base.port; if (DISPLAY_VER(dev_priv) < 12) - data->fw_ddi = intel_get_mei_fw_ddi_index(port); + data->hdcp_ddi = intel_get_hdcp_ddi_index(port); else /* - * As per ME FW API expectation, for GEN 12+, fw_ddi is filled + * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled * with zero(INVALID PORT index). */ - data->fw_ddi = MEI_DDI_INVALID_PORT; + data->hdcp_ddi = HDCP_DDI_INVALID_PORT; /* - * As associated transcoder is set and modified at modeset, here fw_tc + * As associated transcoder is set and modified at modeset, here hdcp_transcoder * is initialized to zero (invalid transcoder index). This will be * retained for <Gen12 forever. */ - data->fw_tc = MEI_INVALID_TRANSCODER; + data->hdcp_transcoder = HDCP_INVALID_TRANSCODER; data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED; data->protocol = (u8)shim->protocol; @@ -2232,6 +2241,9 @@ static int initialize_hdcp_port_data(struct intel_connector *connector, static bool is_hdcp2_supported(struct drm_i915_private *dev_priv) { + if (intel_hdcp_gsc_cs_required(dev_priv)) + return true; + if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP)) return false; @@ -2253,10 +2265,14 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv) dev_priv->display.hdcp.comp_added = true; mutex_unlock(&dev_priv->display.hdcp.comp_mutex); - ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops, - I915_COMPONENT_HDCP); + if (intel_hdcp_gsc_cs_required(dev_priv)) + ret = intel_hdcp_gsc_init(dev_priv); + else + ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_ops, + I915_COMPONENT_HDCP); + if (ret < 0) { - drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n", + drm_dbg_kms(&dev_priv->drm, "Failed at fw component add(%d)\n", ret); mutex_lock(&dev_priv->display.hdcp.comp_mutex); dev_priv->display.hdcp.comp_added = false; @@ -2347,7 +2363,8 @@ int intel_hdcp_enable(struct intel_connector *connector, } if (DISPLAY_VER(dev_priv) >= 12) - dig_port->hdcp_port_data.fw_tc = intel_get_mei_fw_tc(hdcp->cpu_transcoder); + dig_port->hdcp_port_data.hdcp_transcoder = + intel_get_hdcp_transcoder(hdcp->cpu_transcoder); /* * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup @@ -2482,7 +2499,10 @@ void intel_hdcp_component_fini(struct drm_i915_private *dev_priv) dev_priv->display.hdcp.comp_added = false; mutex_unlock(&dev_priv->display.hdcp.comp_mutex); - component_del(dev_priv->drm.dev, &i915_hdcp_component_ops); + if (intel_hdcp_gsc_cs_required(dev_priv)) + intel_hdcp_gsc_fini(dev_priv); + else + component_del(dev_priv->drm.dev, &i915_hdcp_ops); } void intel_hdcp_cleanup(struct intel_connector *connector) diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c new file mode 100644 index 000000000000..7e52aea6aa17 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c @@ -0,0 +1,831 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2023, Intel Corporation. + */ + +#include <drm/i915_hdcp_interface.h> + +#include "display/intel_hdcp_gsc.h" +#include "gem/i915_gem_region.h" +#include "gt/uc/intel_gsc_uc_heci_cmd_submit.h" +#include "i915_drv.h" +#include "i915_utils.h" + +bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915) +{ + return DISPLAY_VER(i915) >= 14; +} + +static int +gsc_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data, + struct hdcp2_ake_init *ake_data) +{ + struct wired_cmd_initiate_hdcp2_session_in session_init_in = { { 0 } }; + struct wired_cmd_initiate_hdcp2_session_out + session_init_out = { { 0 } }; + struct drm_i915_private *i915; + ssize_t byte; + + if (!dev || !data || !ake_data) + return -EINVAL; + + i915 = kdev_to_i915(dev); + if (!i915) { + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); + return -ENODEV; + } + + session_init_in.header.api_version = HDCP_API_VERSION; + session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION; + session_init_in.header.status = FW_HDCP_STATUS_SUCCESS; + session_init_in.header.buffer_len = + WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN; + + session_init_in.port.integrated_port_type = data->port_type; + session_init_in.port.physical_port = (u8)data->hdcp_ddi; + session_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder; + session_init_in.protocol = data->protocol; + + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_init_in, + sizeof(session_init_in), + (u8 *)&session_init_out, + sizeof(session_init_out)); + if (byte < 0) { + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + return byte; + } + + if (session_init_out.header.status != FW_HDCP_STATUS_SUCCESS) { + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", + WIRED_INITIATE_HDCP2_SESSION, + session_init_out.header.status); + return -EIO; + } + + ake_data->msg_id = HDCP_2_2_AKE_INIT; + ake_data->tx_caps = session_init_out.tx_caps; + memcpy(ake_data->r_tx, session_init_out.r_tx, HDCP_2_2_RTX_LEN); + + return 0; +} + +static int +gsc_hdcp_verify_receiver_cert_prepare_km(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ake_send_cert *rx_cert, + bool *km_stored, + struct hdcp2_ake_no_stored_km + *ek_pub_km, + size_t *msg_sz) +{ + struct wired_cmd_verify_receiver_cert_in verify_rxcert_in = { { 0 } }; + struct wired_cmd_verify_receiver_cert_out verify_rxcert_out = { { 0 } }; + struct drm_i915_private *i915; + ssize_t byte; + + if (!dev || !data || !rx_cert || !km_stored || !ek_pub_km || !msg_sz) + return -EINVAL; + + i915 = kdev_to_i915(dev); + if (!i915) { + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); + return -ENODEV; + } + + verify_rxcert_in.header.api_version = HDCP_API_VERSION; + verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT; + verify_rxcert_in.header.status = FW_HDCP_STATUS_SUCCESS; + verify_rxcert_in.header.buffer_len = + WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN; + + verify_rxcert_in.port.integrated_port_type = data->port_type; + verify_rxcert_in.port.physical_port = (u8)data->hdcp_ddi; + verify_rxcert_in.port.attached_transcoder = (u8)data->hdcp_transcoder; + + verify_rxcert_in.cert_rx = rx_cert->cert_rx; + memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN); + memcpy(verify_rxcert_in.rx_caps, rx_cert->rx_caps, HDCP_2_2_RXCAPS_LEN); + + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_rxcert_in, + sizeof(verify_rxcert_in), + (u8 *)&verify_rxcert_out, + sizeof(verify_rxcert_out)); + if (byte < 0) { + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed: %zd\n", byte); + return byte; + } + + if (verify_rxcert_out.header.status != FW_HDCP_STATUS_SUCCESS) { + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", + WIRED_VERIFY_RECEIVER_CERT, + verify_rxcert_out.header.status); + return -EIO; + } + + *km_stored = !!verify_rxcert_out.km_stored; + if (verify_rxcert_out.km_stored) { + ek_pub_km->msg_id = HDCP_2_2_AKE_STORED_KM; + *msg_sz = sizeof(struct hdcp2_ake_stored_km); + } else { + ek_pub_km->msg_id = HDCP_2_2_AKE_NO_STORED_KM; + *msg_sz = sizeof(struct hdcp2_ake_no_stored_km); + } + + memcpy(ek_pub_km->e_kpub_km, &verify_rxcert_out.ekm_buff, + sizeof(verify_rxcert_out.ekm_buff)); + + return 0; +} + +static int +gsc_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data, + struct hdcp2_ake_send_hprime *rx_hprime) +{ + struct wired_cmd_ake_send_hprime_in send_hprime_in = { { 0 } }; + struct wired_cmd_ake_send_hprime_out send_hprime_out = { { 0 } }; + struct drm_i915_private *i915; + ssize_t byte; + + if (!dev || !data || !rx_hprime) + return -EINVAL; + + i915 = kdev_to_i915(dev); + if (!i915) { + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); + return -ENODEV; + } + + send_hprime_in.header.api_version = HDCP_API_VERSION; + send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME; + send_hprime_in.header.status = FW_HDCP_STATUS_SUCCESS; + send_hprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN; + + send_hprime_in.port.integrated_port_type = data->port_type; + send_hprime_in.port.physical_port = (u8)data->hdcp_ddi; + send_hprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder; + + memcpy(send_hprime_in.h_prime, rx_hprime->h_prime, + HDCP_2_2_H_PRIME_LEN); + + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&send_hprime_in, + sizeof(send_hprime_in), + (u8 *)&send_hprime_out, + sizeof(send_hprime_out)); + if (byte < 0) { + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + return byte; + } + + if (send_hprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", + WIRED_AKE_SEND_HPRIME, send_hprime_out.header.status); + return -EIO; + } + + return 0; +} + +static int +gsc_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data, + struct hdcp2_ake_send_pairing_info *pairing_info) +{ + struct wired_cmd_ake_send_pairing_info_in pairing_info_in = { { 0 } }; + struct wired_cmd_ake_send_pairing_info_out pairing_info_out = { { 0 } }; + struct drm_i915_private *i915; + ssize_t byte; + + if (!dev || !data || !pairing_info) + return -EINVAL; + + i915 = kdev_to_i915(dev); + if (!i915) { + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); + return -ENODEV; + } + + pairing_info_in.header.api_version = HDCP_API_VERSION; + pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO; + pairing_info_in.header.status = FW_HDCP_STATUS_SUCCESS; + pairing_info_in.header.buffer_len = + WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN; + + pairing_info_in.port.integrated_port_type = data->port_type; + pairing_info_in.port.physical_port = (u8)data->hdcp_ddi; + pairing_info_in.port.attached_transcoder = (u8)data->hdcp_transcoder; + + memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km, + HDCP_2_2_E_KH_KM_LEN); + + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&pairing_info_in, + sizeof(pairing_info_in), + (u8 *)&pairing_info_out, + sizeof(pairing_info_out)); + if (byte < 0) { + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + return byte; + } + + if (pairing_info_out.header.status != FW_HDCP_STATUS_SUCCESS) { + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. Status: 0x%X\n", + WIRED_AKE_SEND_PAIRING_INFO, + pairing_info_out.header.status); + return -EIO; + } + + return 0; +} + +static int +gsc_hdcp_initiate_locality_check(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_lc_init *lc_init_data) +{ + struct wired_cmd_init_locality_check_in lc_init_in = { { 0 } }; + struct wired_cmd_init_locality_check_out lc_init_out = { { 0 } }; + struct drm_i915_private *i915; + ssize_t byte; + + if (!dev || !data || !lc_init_data) + return -EINVAL; + + i915 = kdev_to_i915(dev); + if (!i915) { + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); + return -ENODEV; + } + + lc_init_in.header.api_version = HDCP_API_VERSION; + lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK; + lc_init_in.header.status = FW_HDCP_STATUS_SUCCESS; + lc_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN; + + lc_init_in.port.integrated_port_type = data->port_type; + lc_init_in.port.physical_port = (u8)data->hdcp_ddi; + lc_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder; + + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&lc_init_in, sizeof(lc_init_in), + (u8 *)&lc_init_out, sizeof(lc_init_out)); + if (byte < 0) { + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + return byte; + } + + if (lc_init_out.header.status != FW_HDCP_STATUS_SUCCESS) { + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. status: 0x%X\n", + WIRED_INIT_LOCALITY_CHECK, lc_init_out.header.status); + return -EIO; + } + + lc_init_data->msg_id = HDCP_2_2_LC_INIT; + memcpy(lc_init_data->r_n, lc_init_out.r_n, HDCP_2_2_RN_LEN); + + return 0; +} + +static int +gsc_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data, + struct hdcp2_lc_send_lprime *rx_lprime) +{ + struct wired_cmd_validate_locality_in verify_lprime_in = { { 0 } }; + struct wired_cmd_validate_locality_out verify_lprime_out = { { 0 } }; + struct drm_i915_private *i915; + ssize_t byte; + + if (!dev || !data || !rx_lprime) + return -EINVAL; + + i915 = kdev_to_i915(dev); + if (!i915) { + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); + return -ENODEV; + } + + verify_lprime_in.header.api_version = HDCP_API_VERSION; + verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY; + verify_lprime_in.header.status = FW_HDCP_STATUS_SUCCESS; + verify_lprime_in.header.buffer_len = + WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN; + + verify_lprime_in.port.integrated_port_type = data->port_type; + verify_lprime_in.port.physical_port = (u8)data->hdcp_ddi; + verify_lprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder; + + memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime, + HDCP_2_2_L_PRIME_LEN); + + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_lprime_in, + sizeof(verify_lprime_in), + (u8 *)&verify_lprime_out, + sizeof(verify_lprime_out)); + if (byte < 0) { + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + return byte; + } + + if (verify_lprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", + WIRED_VALIDATE_LOCALITY, + verify_lprime_out.header.status); + return -EIO; + } + + return 0; +} + +static int gsc_hdcp_get_session_key(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ske_send_eks *ske_data) +{ + struct wired_cmd_get_session_key_in get_skey_in = { { 0 } }; + struct wired_cmd_get_session_key_out get_skey_out = { { 0 } }; + struct drm_i915_private *i915; + ssize_t byte; + + if (!dev || !data || !ske_data) + return -EINVAL; + + i915 = kdev_to_i915(dev); + if (!i915) { + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); + return -ENODEV; + } + + get_skey_in.header.api_version = HDCP_API_VERSION; + get_skey_in.header.command_id = WIRED_GET_SESSION_KEY; + get_skey_in.header.status = FW_HDCP_STATUS_SUCCESS; + get_skey_in.header.buffer_len = WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN; + + get_skey_in.port.integrated_port_type = data->port_type; + get_skey_in.port.physical_port = (u8)data->hdcp_ddi; + get_skey_in.port.attached_transcoder = (u8)data->hdcp_transcoder; + + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&get_skey_in, sizeof(get_skey_in), + (u8 *)&get_skey_out, sizeof(get_skey_out)); + if (byte < 0) { + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + return byte; + } + + if (get_skey_out.header.status != FW_HDCP_STATUS_SUCCESS) { + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", + WIRED_GET_SESSION_KEY, get_skey_out.header.status); + return -EIO; + } + + ske_data->msg_id = HDCP_2_2_SKE_SEND_EKS; + memcpy(ske_data->e_dkey_ks, get_skey_out.e_dkey_ks, + HDCP_2_2_E_DKEY_KS_LEN); + memcpy(ske_data->riv, get_skey_out.r_iv, HDCP_2_2_RIV_LEN); + + return 0; +} + +static int +gsc_hdcp_repeater_check_flow_prepare_ack(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_rep_send_receiverid_list + *rep_topology, + struct hdcp2_rep_send_ack + *rep_send_ack) +{ + struct wired_cmd_verify_repeater_in verify_repeater_in = { { 0 } }; + struct wired_cmd_verify_repeater_out verify_repeater_out = { { 0 } }; + struct drm_i915_private *i915; + ssize_t byte; + + if (!dev || !rep_topology || !rep_send_ack || !data) + return -EINVAL; + + i915 = kdev_to_i915(dev); + if (!i915) { + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); + return -ENODEV; + } + + verify_repeater_in.header.api_version = HDCP_API_VERSION; + verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER; + verify_repeater_in.header.status = FW_HDCP_STATUS_SUCCESS; + verify_repeater_in.header.buffer_len = + WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN; + + verify_repeater_in.port.integrated_port_type = data->port_type; + verify_repeater_in.port.physical_port = (u8)data->hdcp_ddi; + verify_repeater_in.port.attached_transcoder = (u8)data->hdcp_transcoder; + + memcpy(verify_repeater_in.rx_info, rep_topology->rx_info, + HDCP_2_2_RXINFO_LEN); + memcpy(verify_repeater_in.seq_num_v, rep_topology->seq_num_v, + HDCP_2_2_SEQ_NUM_LEN); + memcpy(verify_repeater_in.v_prime, rep_topology->v_prime, + HDCP_2_2_V_PRIME_HALF_LEN); + memcpy(verify_repeater_in.receiver_ids, rep_topology->receiver_ids, + HDCP_2_2_RECEIVER_IDS_MAX_LEN); + + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_repeater_in, + sizeof(verify_repeater_in), + (u8 *)&verify_repeater_out, + sizeof(verify_repeater_out)); + if (byte < 0) { + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + return byte; + } + + if (verify_repeater_out.header.status != FW_HDCP_STATUS_SUCCESS) { + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", + WIRED_VERIFY_REPEATER, + verify_repeater_out.header.status); + return -EIO; + } + + memcpy(rep_send_ack->v, verify_repeater_out.v, + HDCP_2_2_V_PRIME_HALF_LEN); + rep_send_ack->msg_id = HDCP_2_2_REP_SEND_ACK; + + return 0; +} + +static int gsc_hdcp_verify_mprime(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_rep_stream_ready *stream_ready) +{ + struct wired_cmd_repeater_auth_stream_req_in *verify_mprime_in; + struct wired_cmd_repeater_auth_stream_req_out + verify_mprime_out = { { 0 } }; + struct drm_i915_private *i915; + ssize_t byte; + size_t cmd_size; + + if (!dev || !stream_ready || !data) + return -EINVAL; + + i915 = kdev_to_i915(dev); + if (!i915) { + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); + return -ENODEV; + } + + cmd_size = struct_size(verify_mprime_in, streams, data->k); + if (cmd_size == SIZE_MAX) + return -EINVAL; + + verify_mprime_in = kzalloc(cmd_size, GFP_KERNEL); + if (!verify_mprime_in) + return -ENOMEM; + + verify_mprime_in->header.api_version = HDCP_API_VERSION; + verify_mprime_in->header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ; + verify_mprime_in->header.status = FW_HDCP_STATUS_SUCCESS; + verify_mprime_in->header.buffer_len = cmd_size - sizeof(verify_mprime_in->header); + + verify_mprime_in->port.integrated_port_type = data->port_type; + verify_mprime_in->port.physical_port = (u8)data->hdcp_ddi; + verify_mprime_in->port.attached_transcoder = (u8)data->hdcp_transcoder; + + memcpy(verify_mprime_in->m_prime, stream_ready->m_prime, HDCP_2_2_MPRIME_LEN); + drm_hdcp_cpu_to_be24(verify_mprime_in->seq_num_m, data->seq_num_m); + + memcpy(verify_mprime_in->streams, data->streams, + array_size(data->k, sizeof(*data->streams))); + + verify_mprime_in->k = cpu_to_be16(data->k); + + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)verify_mprime_in, cmd_size, + (u8 *)&verify_mprime_out, + sizeof(verify_mprime_out)); + kfree(verify_mprime_in); + if (byte < 0) { + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + return byte; + } + + if (verify_mprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", + WIRED_REPEATER_AUTH_STREAM_REQ, + verify_mprime_out.header.status); + return -EIO; + } + + return 0; +} + +static int gsc_hdcp_enable_authentication(struct device *dev, + struct hdcp_port_data *data) +{ + struct wired_cmd_enable_auth_in enable_auth_in = { { 0 } }; + struct wired_cmd_enable_auth_out enable_auth_out = { { 0 } }; + struct drm_i915_private *i915; + ssize_t byte; + + if (!dev || !data) + return -EINVAL; + + i915 = kdev_to_i915(dev); + if (!i915) { + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); + return -ENODEV; + } + + enable_auth_in.header.api_version = HDCP_API_VERSION; + enable_auth_in.header.command_id = WIRED_ENABLE_AUTH; + enable_auth_in.header.status = FW_HDCP_STATUS_SUCCESS; + enable_auth_in.header.buffer_len = WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN; + + enable_auth_in.port.integrated_port_type = data->port_type; + enable_auth_in.port.physical_port = (u8)data->hdcp_ddi; + enable_auth_in.port.attached_transcoder = (u8)data->hdcp_transcoder; + enable_auth_in.stream_type = data->streams[0].stream_type; + + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&enable_auth_in, + sizeof(enable_auth_in), + (u8 *)&enable_auth_out, + sizeof(enable_auth_out)); + if (byte < 0) { + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + return byte; + } + + if (enable_auth_out.header.status != FW_HDCP_STATUS_SUCCESS) { + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", + WIRED_ENABLE_AUTH, enable_auth_out.header.status); + return -EIO; + } + + return 0; +} + +static int +gsc_hdcp_close_session(struct device *dev, struct hdcp_port_data *data) +{ + struct wired_cmd_close_session_in session_close_in = { { 0 } }; + struct wired_cmd_close_session_out session_close_out = { { 0 } }; + struct drm_i915_private *i915; + ssize_t byte; + + if (!dev || !data) + return -EINVAL; + + i915 = kdev_to_i915(dev); + if (!i915) { + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); + return -ENODEV; + } + + session_close_in.header.api_version = HDCP_API_VERSION; + session_close_in.header.command_id = WIRED_CLOSE_SESSION; + session_close_in.header.status = FW_HDCP_STATUS_SUCCESS; + session_close_in.header.buffer_len = + WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN; + + session_close_in.port.integrated_port_type = data->port_type; + session_close_in.port.physical_port = (u8)data->hdcp_ddi; + session_close_in.port.attached_transcoder = (u8)data->hdcp_transcoder; + + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_close_in, + sizeof(session_close_in), + (u8 *)&session_close_out, + sizeof(session_close_out)); + if (byte < 0) { + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); + return byte; + } + + if (session_close_out.header.status != FW_HDCP_STATUS_SUCCESS) { + drm_dbg_kms(&i915->drm, "Session Close Failed. status: 0x%X\n", + session_close_out.header.status); + return -EIO; + } + + return 0; +} + +static const struct i915_hdcp_ops gsc_hdcp_ops = { + .initiate_hdcp2_session = gsc_hdcp_initiate_session, + .verify_receiver_cert_prepare_km = + gsc_hdcp_verify_receiver_cert_prepare_km, + .verify_hprime = gsc_hdcp_verify_hprime, + .store_pairing_info = gsc_hdcp_store_pairing_info, + .initiate_locality_check = gsc_hdcp_initiate_locality_check, + .verify_lprime = gsc_hdcp_verify_lprime, + .get_session_key = gsc_hdcp_get_session_key, + .repeater_check_flow_prepare_ack = + gsc_hdcp_repeater_check_flow_prepare_ack, + .verify_mprime = gsc_hdcp_verify_mprime, + .enable_hdcp_authentication = gsc_hdcp_enable_authentication, + .close_hdcp_session = gsc_hdcp_close_session, +}; + +/*This function helps allocate memory for the command that we will send to gsc cs */ +static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915, + struct intel_hdcp_gsc_message *hdcp_message) +{ + struct intel_gt *gt = i915->media_gt; + struct drm_i915_gem_object *obj = NULL; + struct i915_vma *vma = NULL; + void *cmd; + int err; + + /* allocate object of one page for HDCP command memory and store it */ + obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); + + if (IS_ERR(obj)) { + drm_err(&i915->drm, "Failed to allocate HDCP streaming command!\n"); + return PTR_ERR(obj); + } + + cmd = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true)); + if (IS_ERR(cmd)) { + drm_err(&i915->drm, "Failed to map gsc message page!\n"); + err = PTR_ERR(cmd); + goto out_unpin; + } + + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto out_unmap; + } + + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); + if (err) + goto out_unmap; + + memset(cmd, 0, obj->base.size); + + hdcp_message->hdcp_cmd = cmd; + hdcp_message->vma = vma; + + return 0; + +out_unmap: + i915_gem_object_unpin_map(obj); +out_unpin: + i915_gem_object_put(obj); + return err; +} + +static int intel_hdcp_gsc_hdcp2_init(struct drm_i915_private *i915) +{ + struct intel_hdcp_gsc_message *hdcp_message; + int ret; + + hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL); + + if (!hdcp_message) + return -ENOMEM; + + /* + * NOTE: No need to lock the comp mutex here as it is already + * going to be taken before this function called + */ + i915->display.hdcp.hdcp_message = hdcp_message; + ret = intel_hdcp_gsc_initialize_message(i915, hdcp_message); + + if (ret) + drm_err(&i915->drm, "Could not initialize hdcp_message\n"); + + return ret; +} + +static void intel_hdcp_gsc_free_message(struct drm_i915_private *i915) +{ + struct intel_hdcp_gsc_message *hdcp_message = + i915->display.hdcp.hdcp_message; + + i915_vma_unpin_and_release(&hdcp_message->vma, I915_VMA_RELEASE_MAP); + kfree(hdcp_message); +} + +int intel_hdcp_gsc_init(struct drm_i915_private *i915) +{ + struct i915_hdcp_master *data; + int ret; + + data = kzalloc(sizeof(struct i915_hdcp_master), GFP_KERNEL); + if (!data) + return -ENOMEM; + + mutex_lock(&i915->display.hdcp.comp_mutex); + i915->display.hdcp.master = data; + i915->display.hdcp.master->hdcp_dev = i915->drm.dev; + i915->display.hdcp.master->ops = &gsc_hdcp_ops; + ret = intel_hdcp_gsc_hdcp2_init(i915); + mutex_unlock(&i915->display.hdcp.comp_mutex); + + return ret; +} + +void intel_hdcp_gsc_fini(struct drm_i915_private *i915) +{ + intel_hdcp_gsc_free_message(i915); + kfree(i915->display.hdcp.master); +} + +static int intel_gsc_send_sync(struct drm_i915_private *i915, + struct intel_gsc_mtl_header *header, u64 addr, + size_t msg_out_len) +{ + struct intel_gt *gt = i915->media_gt; + int ret; + + header->flags = 0; + ret = intel_gsc_uc_heci_cmd_submit_packet(>->uc.gsc, addr, + header->message_size, + addr, + msg_out_len + sizeof(*header)); + if (ret) { + drm_err(&i915->drm, "failed to send gsc HDCP msg (%d)\n", ret); + return ret; + } + + /* + * Checking validity marker for memory sanity + */ + if (header->validity_marker != GSC_HECI_VALIDITY_MARKER) { + drm_err(&i915->drm, "invalid validity marker\n"); + return -EINVAL; + } + + if (header->status != 0) { + drm_err(&i915->drm, "header status indicates error %d\n", + header->status); + return -EINVAL; + } + + if (header->flags & GSC_OUTFLAG_MSG_PENDING) + return -EAGAIN; + + return 0; +} + +/* + * This function can now be used for sending requests and will also handle + * receipt of reply messages hence no different function of message retrieval + * is required. We will initialize intel_hdcp_gsc_message structure then add + * gsc cs memory header as stated in specs after which the normal HDCP payload + * will follow + */ +ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in, + size_t msg_in_len, u8 *msg_out, + size_t msg_out_len) +{ + struct intel_gt *gt = i915->media_gt; + struct intel_gsc_mtl_header *header; + const size_t max_msg_size = PAGE_SIZE - sizeof(*header); + struct intel_hdcp_gsc_message *hdcp_message; + u64 addr, host_session_id; + u32 reply_size, msg_size; + int ret, tries = 0; + + if (!intel_uc_uses_gsc_uc(>->uc)) + return -ENODEV; + + if (msg_in_len > max_msg_size || msg_out_len > max_msg_size) + return -ENOSPC; + + hdcp_message = i915->display.hdcp.hdcp_message; + header = hdcp_message->hdcp_cmd; + addr = i915_ggtt_offset(hdcp_message->vma); + + msg_size = msg_in_len + sizeof(*header); + memset(header, 0, msg_size); + get_random_bytes(&host_session_id, sizeof(u64)); + intel_gsc_uc_heci_cmd_emit_mtl_header(header, HECI_MEADDRESS_HDCP, + msg_size, host_session_id); + memcpy(hdcp_message->hdcp_cmd + sizeof(*header), msg_in, msg_in_len); + + /* + * Keep sending request in case the pending bit is set no need to add + * message handle as we are using same address hence loc. of header is + * same and it will contain the message handle. we will send the message + * 20 times each message 50 ms apart + */ + do { + ret = intel_gsc_send_sync(i915, header, addr, msg_out_len); + + /* Only try again if gsc says so */ + if (ret != -EAGAIN) + break; + + msleep(50); + + } while (++tries < 20); + + if (ret) + goto err; + + /* we use the same mem for the reply, so header is in the same loc */ + reply_size = header->message_size - sizeof(*header); + if (reply_size > msg_out_len) { + drm_warn(&i915->drm, "caller with insufficient HDCP reply size %u (%d)\n", + reply_size, (u32)msg_out_len); + reply_size = msg_out_len; + } else if (reply_size != msg_out_len) { + drm_dbg_kms(&i915->drm, "caller unexpected HCDP reply size %u (%d)\n", + reply_size, (u32)msg_out_len); + } + + memcpy(msg_out, hdcp_message->hdcp_cmd + sizeof(*header), msg_out_len); + +err: + return ret; +} diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h new file mode 100644 index 000000000000..5cc9fd2e88f6 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_HDCP_GSC_H__ +#define __INTEL_HDCP_GSC_H__ + +#include <linux/err.h> +#include <linux/types.h> + +struct drm_i915_private; + +struct intel_hdcp_gsc_message { + struct i915_vma *vma; + void *hdcp_cmd; +}; + +bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915); +ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in, + size_t msg_in_len, u8 *msg_out, + size_t msg_out_len); +int intel_hdcp_gsc_init(struct drm_i915_private *i915); +void intel_hdcp_gsc_fini(struct drm_i915_private *i915); + +#endif /* __INTEL_HDCP_GCS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 907ab7526cb4..b12900446828 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -389,6 +389,13 @@ static void i915_hotplug_work_func(struct work_struct *work) spin_unlock_irq(&dev_priv->irq_lock); + /* Skip calling encode hotplug handlers if ignore long HPD set*/ + if (dev_priv->display.hotplug.ignore_long_hpd) { + drm_dbg_kms(&dev_priv->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n"); + mutex_unlock(&dev_priv->drm.mode_config.mutex); + return; + } + drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { enum hpd_pin pin; @@ -940,4 +947,6 @@ void intel_hpd_debugfs_register(struct drm_i915_private *i915) i915, &i915_hpd_storm_ctl_fops); debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root, i915, &i915_hpd_short_storm_ctl_fops); + debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root, + &i915->display.hotplug.ignore_long_hpd); } diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c index 1d0c9e247c42..4558d02641fe 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c +++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c @@ -26,6 +26,7 @@ #include "intel_fifo_underrun.h" #include "intel_modeset_setup.h" #include "intel_pch_display.h" +#include "intel_vblank.h" #include "intel_wm.h" #include "skl_watermark.h" diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index b8dce0576512..b7973a05d022 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -1159,13 +1159,10 @@ void intel_opregion_register(struct drm_i915_private *i915) intel_opregion_resume(i915); } -void intel_opregion_resume(struct drm_i915_private *i915) +static void intel_opregion_resume_display(struct drm_i915_private *i915) { struct intel_opregion *opregion = &i915->display.opregion; - if (!opregion->header) - return; - if (opregion->acpi) { intel_didl_outputs(i915); intel_setup_cadls(i915); @@ -1186,18 +1183,24 @@ void intel_opregion_resume(struct drm_i915_private *i915) /* Some platforms abuse the _DSM to enable MUX */ intel_dsm_get_bios_data_funcs_supported(i915); - - intel_opregion_notify_adapter(i915, PCI_D0); } -void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state) +void intel_opregion_resume(struct drm_i915_private *i915) { struct intel_opregion *opregion = &i915->display.opregion; if (!opregion->header) return; - intel_opregion_notify_adapter(i915, state); + if (HAS_DISPLAY(i915)) + intel_opregion_resume_display(i915); + + intel_opregion_notify_adapter(i915, PCI_D0); +} + +static void intel_opregion_suspend_display(struct drm_i915_private *i915) +{ + struct intel_opregion *opregion = &i915->display.opregion; if (opregion->asle) opregion->asle->ardy = ASLE_ARDY_NOT_READY; @@ -1208,6 +1211,19 @@ void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state) opregion->acpi->drdy = 0; } +void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state) +{ + struct intel_opregion *opregion = &i915->display.opregion; + + if (!opregion->header) + return; + + intel_opregion_notify_adapter(i915, state); + + if (HAS_DISPLAY(i915)) + intel_opregion_suspend_display(i915); +} + void intel_opregion_unregister(struct drm_i915_private *i915) { struct intel_opregion *opregion = &i915->display.opregion; @@ -1221,6 +1237,14 @@ void intel_opregion_unregister(struct drm_i915_private *i915) unregister_acpi_notifier(&opregion->acpi_notifier); opregion->acpi_notifier.notifier_call = NULL; } +} + +void intel_opregion_cleanup(struct drm_i915_private *i915) +{ + struct intel_opregion *opregion = &i915->display.opregion; + + if (!opregion->header) + return; /* just clear all opregion memory pointers now */ memunmap(opregion->header); diff --git a/drivers/gpu/drm/i915/display/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h index d02e6696a050..fd2ea8ef0fa2 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.h +++ b/drivers/gpu/drm/i915/display/intel_opregion.h @@ -60,6 +60,7 @@ struct intel_opregion { #ifdef CONFIG_ACPI int intel_opregion_setup(struct drm_i915_private *dev_priv); +void intel_opregion_cleanup(struct drm_i915_private *i915); void intel_opregion_register(struct drm_i915_private *dev_priv); void intel_opregion_unregister(struct drm_i915_private *dev_priv); @@ -85,6 +86,10 @@ static inline int intel_opregion_setup(struct drm_i915_private *dev_priv) return 0; } +static inline void intel_opregion_cleanup(struct drm_i915_private *i915) +{ +} + static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { } diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 44610b20cd29..31084d95711d 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -2644,3 +2644,302 @@ void intel_psr_unlock(const struct intel_crtc_state *crtc_state) break; } } + +static void +psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + const char *status = "unknown"; + u32 val, status_val; + + if (intel_dp->psr.psr2_enabled) { + static const char * const live_status[] = { + "IDLE", + "CAPTURE", + "CAPTURE_FS", + "SLEEP", + "BUFON_FW", + "ML_UP", + "SU_STANDBY", + "FAST_SLEEP", + "DEEP_SLEEP", + "BUF_ON", + "TG_ON" + }; + val = intel_de_read(dev_priv, + EDP_PSR2_STATUS(intel_dp->psr.transcoder)); + status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val); + if (status_val < ARRAY_SIZE(live_status)) + status = live_status[status_val]; + } else { + static const char * const live_status[] = { + "IDLE", + "SRDONACK", + "SRDENT", + "BUFOFF", + "BUFON", + "AUXACK", + "SRDOFFACK", + "SRDENT_ON", + }; + val = intel_de_read(dev_priv, + EDP_PSR_STATUS(intel_dp->psr.transcoder)); + status_val = (val & EDP_PSR_STATUS_STATE_MASK) >> + EDP_PSR_STATUS_STATE_SHIFT; + if (status_val < ARRAY_SIZE(live_status)) + status = live_status[status_val]; + } + + seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val); +} + +static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_psr *psr = &intel_dp->psr; + intel_wakeref_t wakeref; + const char *status; + bool enabled; + u32 val; + + seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support)); + if (psr->sink_support) + seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]); + seq_puts(m, "\n"); + + if (!psr->sink_support) + return 0; + + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + mutex_lock(&psr->lock); + + if (psr->enabled) + status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled"; + else + status = "disabled"; + seq_printf(m, "PSR mode: %s\n", status); + + if (!psr->enabled) { + seq_printf(m, "PSR sink not reliable: %s\n", + str_yes_no(psr->sink_not_reliable)); + + goto unlock; + } + + if (psr->psr2_enabled) { + val = intel_de_read(dev_priv, + EDP_PSR2_CTL(intel_dp->psr.transcoder)); + enabled = val & EDP_PSR2_ENABLE; + } else { + val = intel_de_read(dev_priv, + EDP_PSR_CTL(intel_dp->psr.transcoder)); + enabled = val & EDP_PSR_ENABLE; + } + seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", + str_enabled_disabled(enabled), val); + psr_source_status(intel_dp, m); + seq_printf(m, "Busy frontbuffer bits: 0x%08x\n", + psr->busy_frontbuffer_bits); + + /* + * SKL+ Perf counter is reset to 0 everytime DC state is entered + */ + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { + val = intel_de_read(dev_priv, + EDP_PSR_PERF_CNT(intel_dp->psr.transcoder)); + val &= EDP_PSR_PERF_CNT_MASK; + seq_printf(m, "Performance counter: %u\n", val); + } + + if (psr->debug & I915_PSR_DEBUG_IRQ) { + seq_printf(m, "Last attempted entry at: %lld\n", + psr->last_entry_attempt); + seq_printf(m, "Last exit at: %lld\n", psr->last_exit); + } + + if (psr->psr2_enabled) { + u32 su_frames_val[3]; + int frame; + + /* + * Reading all 3 registers before hand to minimize crossing a + * frame boundary between register reads + */ + for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) { + val = intel_de_read(dev_priv, + PSR2_SU_STATUS(intel_dp->psr.transcoder, frame)); + su_frames_val[frame / 3] = val; + } + + seq_puts(m, "Frame:\tPSR2 SU blocks:\n"); + + for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) { + u32 su_blocks; + + su_blocks = su_frames_val[frame / 3] & + PSR2_SU_STATUS_MASK(frame); + su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame); + seq_printf(m, "%d\t%d\n", frame, su_blocks); + } + + seq_printf(m, "PSR2 selective fetch: %s\n", + str_enabled_disabled(psr->psr2_sel_fetch_enabled)); + } + +unlock: + mutex_unlock(&psr->lock); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + + return 0; +} + +static int i915_edp_psr_status_show(struct seq_file *m, void *data) +{ + struct drm_i915_private *dev_priv = m->private; + struct intel_dp *intel_dp = NULL; + struct intel_encoder *encoder; + + if (!HAS_PSR(dev_priv)) + return -ENODEV; + + /* Find the first EDP which supports PSR */ + for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + intel_dp = enc_to_intel_dp(encoder); + break; + } + + if (!intel_dp) + return -ENODEV; + + return intel_psr_status(m, intel_dp); +} +DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status); + +static int +i915_edp_psr_debug_set(void *data, u64 val) +{ + struct drm_i915_private *dev_priv = data; + struct intel_encoder *encoder; + intel_wakeref_t wakeref; + int ret = -ENODEV; + + if (!HAS_PSR(dev_priv)) + return ret; + + for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val); + + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + + // TODO: split to each transcoder's PSR debug state + ret = intel_psr_debug_set(intel_dp, val); + + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + } + + return ret; +} + +static int +i915_edp_psr_debug_get(void *data, u64 *val) +{ + struct drm_i915_private *dev_priv = data; + struct intel_encoder *encoder; + + if (!HAS_PSR(dev_priv)) + return -ENODEV; + + for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + // TODO: split to each transcoder's PSR debug state + *val = READ_ONCE(intel_dp->psr.debug); + return 0; + } + + return -ENODEV; +} + +DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, + i915_edp_psr_debug_get, i915_edp_psr_debug_set, + "%llu\n"); + +void intel_psr_debugfs_register(struct drm_i915_private *i915) +{ + struct drm_minor *minor = i915->drm.primary; + + debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root, + i915, &i915_edp_psr_debug_fops); + + debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root, + i915, &i915_edp_psr_status_fops); +} + +static int i915_psr_sink_status_show(struct seq_file *m, void *data) +{ + struct intel_connector *connector = m->private; + struct intel_dp *intel_dp = intel_attached_dp(connector); + static const char * const sink_status[] = { + "inactive", + "transition to active, capture and display", + "active, display from RFB", + "active, capture and display on sink device timings", + "transition to inactive, capture and display, timing re-sync", + "reserved", + "reserved", + "sink internal error", + }; + const char *str; + int ret; + u8 val; + + if (!CAN_PSR(intel_dp)) { + seq_puts(m, "PSR Unsupported\n"); + return -ENODEV; + } + + if (connector->base.status != connector_status_connected) + return -ENODEV; + + ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val); + if (ret != 1) + return ret < 0 ? ret : -EIO; + + val &= DP_PSR_SINK_STATE_MASK; + if (val < ARRAY_SIZE(sink_status)) + str = sink_status[val]; + else + str = "unknown"; + + seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status); + +static int i915_psr_status_show(struct seq_file *m, void *data) +{ + struct intel_connector *connector = m->private; + struct intel_dp *intel_dp = intel_attached_dp(connector); + + return intel_psr_status(m, intel_dp); +} +DEFINE_SHOW_ATTRIBUTE(i915_psr_status); + +void intel_psr_connector_debugfs_add(struct intel_connector *connector) +{ + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct dentry *root = connector->base.debugfs_entry; + + if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) + return; + + debugfs_create_file("i915_psr_sink_status", 0444, root, + connector, &i915_psr_sink_status_fops); + + if (HAS_PSR(i915)) + debugfs_create_file("i915_psr_status", 0444, root, + connector, &i915_psr_status_fops); +} diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index 7a38a9e7fa5b..0b95e8aa615f 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -13,6 +13,7 @@ struct drm_connector; struct drm_connector_state; struct drm_i915_private; struct intel_atomic_state; +struct intel_connector; struct intel_crtc; struct intel_crtc_state; struct intel_dp; @@ -61,5 +62,7 @@ void intel_psr_resume(struct intel_dp *intel_dp); void intel_psr_lock(const struct intel_crtc_state *crtc_state); void intel_psr_unlock(const struct intel_crtc_state *crtc_state); +void intel_psr_connector_debugfs_add(struct intel_connector *connector); +void intel_psr_debugfs_register(struct drm_i915_private *i915); #endif /* __INTEL_PSR_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index a16e56a60c30..25034bbf1445 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -32,85 +32,20 @@ #include <linux/string_helpers.h> -#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> #include <drm/drm_color_mgmt.h> -#include <drm/drm_crtc.h> -#include <drm/drm_damage_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_rect.h> #include "i915_drv.h" #include "i915_reg.h" -#include "i915_vgpu.h" #include "i9xx_plane.h" #include "intel_atomic_plane.h" -#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_fb.h" -#include "intel_frontbuffer.h" #include "intel_sprite.h" -#include "intel_vrr.h" - -int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) -{ - struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); - const struct drm_framebuffer *fb = plane_state->hw.fb; - struct drm_rect *src = &plane_state->uapi.src; - u32 src_x, src_y, src_w, src_h, hsub, vsub; - bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation); - - /* - * FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS - * abuses hsub/vsub so we can't use them here. But as they - * are limited to 32bpp RGB formats we don't actually need - * to check anything. - */ - if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) - return 0; - - /* - * Hardware doesn't handle subpixel coordinates. - * Adjust to (macro)pixel boundary, but be careful not to - * increase the source viewport size, because that could - * push the downscaling factor out of bounds. - */ - src_x = src->x1 >> 16; - src_w = drm_rect_width(src) >> 16; - src_y = src->y1 >> 16; - src_h = drm_rect_height(src) >> 16; - - drm_rect_init(src, src_x << 16, src_y << 16, - src_w << 16, src_h << 16); - - if (fb->format->format == DRM_FORMAT_RGB565 && rotated) { - hsub = 2; - vsub = 2; - } else { - hsub = fb->format->hsub; - vsub = fb->format->vsub; - } - - if (rotated) - hsub = vsub = max(hsub, vsub); - - if (src_x % hsub || src_w % hsub) { - drm_dbg_kms(&i915->drm, "src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n", - src_x, src_w, hsub, str_yes_no(rotated)); - return -EINVAL; - } - - if (src_y % vsub || src_h % vsub) { - drm_dbg_kms(&i915->drm, "src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n", - src_y, src_h, vsub, str_yes_no(rotated)); - return -EINVAL; - } - - return 0; -} static void i9xx_plane_linear_gamma(u16 gamma[8]) { @@ -1449,124 +1384,6 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state, return 0; } -static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv) -{ - return DISPLAY_VER(dev_priv) >= 9; -} - -static void intel_plane_set_ckey(struct intel_plane_state *plane_state, - const struct drm_intel_sprite_colorkey *set) -{ - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - - *key = *set; - - /* - * We want src key enabled on the - * sprite and not on the primary. - */ - if (plane->id == PLANE_PRIMARY && - set->flags & I915_SET_COLORKEY_SOURCE) - key->flags = 0; - - /* - * On SKL+ we want dst key enabled on - * the primary and not on the sprite. - */ - if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_PRIMARY && - set->flags & I915_SET_COLORKEY_DESTINATION) - key->flags = 0; -} - -int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_intel_sprite_colorkey *set = data; - struct drm_plane *plane; - struct drm_plane_state *plane_state; - struct drm_atomic_state *state; - struct drm_modeset_acquire_ctx ctx; - int ret = 0; - - /* ignore the pointless "none" flag */ - set->flags &= ~I915_SET_COLORKEY_NONE; - - if (set->flags & ~(I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) - return -EINVAL; - - /* Make sure we don't try to enable both src & dest simultaneously */ - if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) - return -EINVAL; - - if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && - set->flags & I915_SET_COLORKEY_DESTINATION) - return -EINVAL; - - plane = drm_plane_find(dev, file_priv, set->plane_id); - if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) - return -ENOENT; - - /* - * SKL+ only plane 2 can do destination keying against plane 1. - * Also multiple planes can't do destination keying on the same - * pipe simultaneously. - */ - if (DISPLAY_VER(dev_priv) >= 9 && - to_intel_plane(plane)->id >= PLANE_SPRITE1 && - set->flags & I915_SET_COLORKEY_DESTINATION) - return -EINVAL; - - drm_modeset_acquire_init(&ctx, 0); - - state = drm_atomic_state_alloc(plane->dev); - if (!state) { - ret = -ENOMEM; - goto out; - } - state->acquire_ctx = &ctx; - - while (1) { - plane_state = drm_atomic_get_plane_state(state, plane); - ret = PTR_ERR_OR_ZERO(plane_state); - if (!ret) - intel_plane_set_ckey(to_intel_plane_state(plane_state), set); - - /* - * On some platforms we have to configure - * the dst colorkey on the primary plane. - */ - if (!ret && has_dst_key_in_primary_plane(dev_priv)) { - struct intel_crtc *crtc = - intel_crtc_for_pipe(dev_priv, - to_intel_plane(plane)->pipe); - - plane_state = drm_atomic_get_plane_state(state, - crtc->base.primary); - ret = PTR_ERR_OR_ZERO(plane_state); - if (!ret) - intel_plane_set_ckey(to_intel_plane_state(plane_state), set); - } - - if (!ret) - ret = drm_atomic_commit(state); - - if (ret != -EDEADLK) - break; - - drm_atomic_state_clear(state); - drm_modeset_backoff(&ctx); - } - - drm_atomic_state_put(state); -out: - drm_modeset_drop_locks(&ctx); - drm_modeset_acquire_fini(&ctx); - return ret; -} - static const u32 g4x_sprite_formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_YUYV, diff --git a/drivers/gpu/drm/i915/display/intel_sprite_uapi.c b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c new file mode 100644 index 000000000000..70a391083751 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_crtc.h" +#include "intel_display_types.h" +#include "intel_sprite_uapi.h" + +static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv) +{ + return DISPLAY_VER(dev_priv) >= 9; +} + +static void intel_plane_set_ckey(struct intel_plane_state *plane_state, + const struct drm_intel_sprite_colorkey *set) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + + *key = *set; + + /* + * We want src key enabled on the + * sprite and not on the primary. + */ + if (plane->id == PLANE_PRIMARY && + set->flags & I915_SET_COLORKEY_SOURCE) + key->flags = 0; + + /* + * On SKL+ we want dst key enabled on + * the primary and not on the sprite. + */ + if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_PRIMARY && + set->flags & I915_SET_COLORKEY_DESTINATION) + key->flags = 0; +} + +int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_intel_sprite_colorkey *set = data; + struct drm_plane *plane; + struct drm_plane_state *plane_state; + struct drm_atomic_state *state; + struct drm_modeset_acquire_ctx ctx; + int ret = 0; + + /* ignore the pointless "none" flag */ + set->flags &= ~I915_SET_COLORKEY_NONE; + + if (set->flags & ~(I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) + return -EINVAL; + + /* Make sure we don't try to enable both src & dest simultaneously */ + if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) + return -EINVAL; + + if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + set->flags & I915_SET_COLORKEY_DESTINATION) + return -EINVAL; + + plane = drm_plane_find(dev, file_priv, set->plane_id); + if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) + return -ENOENT; + + /* + * SKL+ only plane 2 can do destination keying against plane 1. + * Also multiple planes can't do destination keying on the same + * pipe simultaneously. + */ + if (DISPLAY_VER(dev_priv) >= 9 && + to_intel_plane(plane)->id >= PLANE_SPRITE1 && + set->flags & I915_SET_COLORKEY_DESTINATION) + return -EINVAL; + + drm_modeset_acquire_init(&ctx, 0); + + state = drm_atomic_state_alloc(plane->dev); + if (!state) { + ret = -ENOMEM; + goto out; + } + state->acquire_ctx = &ctx; + + while (1) { + plane_state = drm_atomic_get_plane_state(state, plane); + ret = PTR_ERR_OR_ZERO(plane_state); + if (!ret) + intel_plane_set_ckey(to_intel_plane_state(plane_state), set); + + /* + * On some platforms we have to configure + * the dst colorkey on the primary plane. + */ + if (!ret && has_dst_key_in_primary_plane(dev_priv)) { + struct intel_crtc *crtc = + intel_crtc_for_pipe(dev_priv, + to_intel_plane(plane)->pipe); + + plane_state = drm_atomic_get_plane_state(state, + crtc->base.primary); + ret = PTR_ERR_OR_ZERO(plane_state); + if (!ret) + intel_plane_set_ckey(to_intel_plane_state(plane_state), set); + } + + if (!ret) + ret = drm_atomic_commit(state); + + if (ret != -EDEADLK) + break; + + drm_atomic_state_clear(state); + drm_modeset_backoff(&ctx); + } + + drm_atomic_state_put(state); +out: + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + return ret; +} diff --git a/drivers/gpu/drm/i915/display/intel_sprite_uapi.h b/drivers/gpu/drm/i915/display/intel_sprite_uapi.h new file mode 100644 index 000000000000..3eb50025acaf --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_sprite_uapi.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_SPRITE_UAPI_H__ +#define __INTEL_SPRITE_UAPI_H__ + +struct drm_device; +struct drm_file; + +int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +#endif /* __INTEL_SPRITE_UAPI_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index f45328712bff..bd8c9df5f98f 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -5,6 +5,7 @@ #include "i915_drv.h" #include "i915_reg.h" +#include "intel_ddi.h" #include "intel_de.h" #include "intel_display.h" #include "intel_display_power_map.h" @@ -118,6 +119,24 @@ assert_tc_cold_blocked(struct intel_digital_port *dig_port) drm_WARN_ON(&i915->drm, !enabled); } +static enum intel_display_power_domain +tc_port_power_domain(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); + + return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1; +} + +static void +assert_tc_port_power_enabled(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + + drm_WARN_ON(&i915->drm, + !intel_display_power_is_enabled(i915, tc_port_power_domain(dig_port))); +} + u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); @@ -418,9 +437,9 @@ static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port) val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia)); if (val == 0xffffffff) { drm_dbg_kms(&i915->drm, - "Port %s: PHY in TCCOLD, assume safe mode\n", + "Port %s: PHY in TCCOLD, assume not owned\n", dig_port->tc_port_name); - return true; + return false; } return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx); @@ -464,7 +483,8 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port, u32 live_status_mask; int max_lanes; - if (!tc_phy_status_complete(dig_port)) { + if (!tc_phy_status_complete(dig_port) && + !drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port)) { drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n", dig_port->tc_port_name); goto out_set_tbt_alt_mode; @@ -539,62 +559,171 @@ static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port) } } -static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port) +static bool tc_phy_is_ready_and_owned(struct intel_digital_port *dig_port, + bool phy_is_ready, bool phy_is_owned) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - if (!tc_phy_status_complete(dig_port)) { - drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n", - dig_port->tc_port_name); - return dig_port->tc_mode == TC_PORT_TBT_ALT; - } + drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready); + + return phy_is_ready && phy_is_owned; +} - /* On ADL-P the PHY complete flag is set in TBT mode as well. */ - if (IS_ALDERLAKE_P(i915) && dig_port->tc_mode == TC_PORT_TBT_ALT) - return true; +static bool tc_phy_is_connected(struct intel_digital_port *dig_port, + enum icl_port_dpll_id port_pll_type) +{ + struct intel_encoder *encoder = &dig_port->base; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + bool phy_is_ready = tc_phy_status_complete(dig_port); + bool phy_is_owned = tc_phy_is_owned(dig_port); + bool is_connected; - if (!tc_phy_is_owned(dig_port)) { - drm_dbg_kms(&i915->drm, "Port %s: PHY not owned\n", - dig_port->tc_port_name); + if (tc_phy_is_ready_and_owned(dig_port, phy_is_ready, phy_is_owned)) + is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY; + else + is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT; - return false; + drm_dbg_kms(&i915->drm, + "Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n", + dig_port->tc_port_name, + str_yes_no(is_connected), + str_yes_no(phy_is_ready), + str_yes_no(phy_is_owned), + port_pll_type == ICL_PORT_DPLL_DEFAULT ? "tbt" : "non-tbt"); + + return is_connected; +} + +static void tc_phy_wait_for_ready(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + + if (wait_for(tc_phy_status_complete(dig_port), 100)) + drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n", + dig_port->tc_port_name); +} + +static enum tc_port_mode +hpd_mask_to_tc_mode(u32 live_status_mask) +{ + if (live_status_mask) + return fls(live_status_mask) - 1; + + return TC_PORT_DISCONNECTED; +} + +static enum tc_port_mode +tc_phy_hpd_live_mode(struct intel_digital_port *dig_port) +{ + u32 live_status_mask = tc_port_live_status_mask(dig_port); + + return hpd_mask_to_tc_mode(live_status_mask); +} + +static enum tc_port_mode +get_tc_mode_in_phy_owned_state(struct intel_digital_port *dig_port, + enum tc_port_mode live_mode) +{ + switch (live_mode) { + case TC_PORT_LEGACY: + case TC_PORT_DP_ALT: + return live_mode; + default: + MISSING_CASE(live_mode); + fallthrough; + case TC_PORT_TBT_ALT: + case TC_PORT_DISCONNECTED: + if (dig_port->tc_legacy_port) + return TC_PORT_LEGACY; + else + return TC_PORT_DP_ALT; } +} - return dig_port->tc_mode == TC_PORT_DP_ALT || - dig_port->tc_mode == TC_PORT_LEGACY; +static enum tc_port_mode +get_tc_mode_in_phy_not_owned_state(struct intel_digital_port *dig_port, + enum tc_port_mode live_mode) +{ + switch (live_mode) { + case TC_PORT_LEGACY: + return TC_PORT_DISCONNECTED; + case TC_PORT_DP_ALT: + case TC_PORT_TBT_ALT: + return TC_PORT_TBT_ALT; + default: + MISSING_CASE(live_mode); + fallthrough; + case TC_PORT_DISCONNECTED: + if (dig_port->tc_legacy_port) + return TC_PORT_DISCONNECTED; + else + return TC_PORT_TBT_ALT; + } } static enum tc_port_mode intel_tc_port_get_current_mode(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - u32 live_status_mask = tc_port_live_status_mask(dig_port); + enum tc_port_mode live_mode = tc_phy_hpd_live_mode(dig_port); + bool phy_is_ready; + bool phy_is_owned; enum tc_port_mode mode; - if (!tc_phy_is_owned(dig_port) || - drm_WARN_ON(&i915->drm, !tc_phy_status_complete(dig_port))) - return TC_PORT_TBT_ALT; + /* + * For legacy ports the IOM firmware initializes the PHY during boot-up + * and system resume whether or not a sink is connected. Wait here for + * the initialization to get ready. + */ + if (dig_port->tc_legacy_port) + tc_phy_wait_for_ready(dig_port); - mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT; - if (live_status_mask) { - enum tc_port_mode live_mode = fls(live_status_mask) - 1; + phy_is_ready = tc_phy_status_complete(dig_port); + phy_is_owned = tc_phy_is_owned(dig_port); - if (!drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT)) - mode = live_mode; + if (!tc_phy_is_ready_and_owned(dig_port, phy_is_ready, phy_is_owned)) { + mode = get_tc_mode_in_phy_not_owned_state(dig_port, live_mode); + } else { + drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT); + mode = get_tc_mode_in_phy_owned_state(dig_port, live_mode); } + drm_dbg_kms(&i915->drm, + "Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n", + dig_port->tc_port_name, + tc_port_mode_name(mode), + str_yes_no(phy_is_ready), + str_yes_no(phy_is_owned), + tc_port_mode_name(live_mode)); + return mode; } +static enum tc_port_mode default_tc_mode(struct intel_digital_port *dig_port) +{ + if (dig_port->tc_legacy_port) + return TC_PORT_LEGACY; + + return TC_PORT_TBT_ALT; +} + +static enum tc_port_mode +hpd_mask_to_target_mode(struct intel_digital_port *dig_port, u32 live_status_mask) +{ + enum tc_port_mode mode = hpd_mask_to_tc_mode(live_status_mask); + + if (mode != TC_PORT_DISCONNECTED) + return mode; + + return default_tc_mode(dig_port); +} + static enum tc_port_mode intel_tc_port_get_target_mode(struct intel_digital_port *dig_port) { u32 live_status_mask = tc_port_live_status_mask(dig_port); - if (live_status_mask) - return fls(live_status_mask) - 1; - - return TC_PORT_TBT_ALT; + return hpd_mask_to_target_mode(dig_port, live_status_mask); } static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port, @@ -660,11 +789,24 @@ static void intel_tc_port_update_mode(struct intel_digital_port *dig_port, tc_cold_unblock(dig_port, domain, wref); } -static void -intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port, - int refcount) +static void __intel_tc_port_get_link(struct intel_digital_port *dig_port) +{ + dig_port->tc_link_refcount++; +} + +static void __intel_tc_port_put_link(struct intel_digital_port *dig_port) +{ + dig_port->tc_link_refcount--; +} + +static bool tc_port_is_enabled(struct intel_digital_port *dig_port) { - dig_port->tc_link_refcount = refcount; + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + + assert_tc_port_power_enabled(dig_port); + + return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) & + DDI_BUF_CTL_ENABLE; } /** @@ -679,6 +821,7 @@ void intel_tc_port_init_mode(struct intel_digital_port *dig_port) struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); intel_wakeref_t tc_cold_wref; enum intel_display_power_domain domain; + bool update_mode = false; mutex_lock(&dig_port->tc_lock); @@ -689,63 +832,105 @@ void intel_tc_port_init_mode(struct intel_digital_port *dig_port) tc_cold_wref = tc_cold_block(dig_port, &domain); dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port); + /* + * Save the initial mode for the state check in + * intel_tc_port_sanitize_mode(). + */ + dig_port->tc_init_mode = dig_port->tc_mode; + if (dig_port->tc_mode != TC_PORT_DISCONNECTED) + dig_port->tc_lock_wakeref = + tc_cold_block(dig_port, &dig_port->tc_lock_power_domain); + + /* + * The PHY needs to be connected for AUX to work during HW readout and + * MST topology resume, but the PHY mode can only be changed if the + * port is disabled. + * + * An exception is the case where BIOS leaves the PHY incorrectly + * disconnected on an enabled legacy port. Work around that by + * connecting the PHY even though the port is enabled. This doesn't + * cause a problem as the PHY ownership state is ignored by the + * IOM/TCSS firmware (only display can own the PHY in that case). + */ + if (!tc_port_is_enabled(dig_port)) { + update_mode = true; + } else if (dig_port->tc_mode == TC_PORT_DISCONNECTED) { + drm_WARN_ON(&i915->drm, !dig_port->tc_legacy_port); + drm_err(&i915->drm, + "Port %s: PHY disconnected on enabled port, connecting it\n", + dig_port->tc_port_name); + update_mode = true; + } + + if (update_mode) + intel_tc_port_update_mode(dig_port, 1, false); + /* Prevent changing dig_port->tc_mode until intel_tc_port_sanitize_mode() is called. */ - intel_tc_port_link_init_refcount(dig_port, 1); - dig_port->tc_lock_wakeref = tc_cold_block(dig_port, &dig_port->tc_lock_power_domain); + __intel_tc_port_get_link(dig_port); tc_cold_unblock(dig_port, domain, tc_cold_wref); - drm_dbg_kms(&i915->drm, "Port %s: init mode (%s)\n", - dig_port->tc_port_name, - tc_port_mode_name(dig_port->tc_mode)); - mutex_unlock(&dig_port->tc_lock); } +static bool tc_port_has_active_links(struct intel_digital_port *dig_port, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT; + int active_links = 0; + + if (dig_port->dp.is_mst) { + /* TODO: get the PLL type for MST, once HW readout is done for it. */ + active_links = intel_dp_mst_encoder_active_links(dig_port); + } else if (crtc_state && crtc_state->hw.active) { + pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state); + active_links = 1; + } + + if (active_links && !tc_phy_is_connected(dig_port, pll_type)) + drm_err(&i915->drm, + "Port %s: PHY disconnected with %d active link(s)\n", + dig_port->tc_port_name, active_links); + + return active_links; +} + /** * intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode * @dig_port: digital port + * @crtc_state: atomic state of CRTC connected to @dig_port * * Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver * loading and system resume: * If the encoder is enabled keep the TypeC mode/PHY connected state locked until * the encoder is disabled. * If the encoder is disabled make sure the PHY is disconnected. + * @crtc_state is valid if @dig_port is enabled, NULL otherwise. */ -void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port) +void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - struct intel_encoder *encoder = &dig_port->base; - int active_links = 0; mutex_lock(&dig_port->tc_lock); - if (dig_port->dp.is_mst) - active_links = intel_dp_mst_encoder_active_links(dig_port); - else if (encoder->base.crtc) - active_links = to_intel_crtc(encoder->base.crtc)->active; - drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount != 1); - intel_tc_port_link_init_refcount(dig_port, active_links); - - if (active_links) { - if (!icl_tc_phy_is_connected(dig_port)) - drm_dbg_kms(&i915->drm, - "Port %s: PHY disconnected with %d active link(s)\n", - dig_port->tc_port_name, active_links); - } else { + if (!tc_port_has_active_links(dig_port, crtc_state)) { /* * TBT-alt is the default mode in any case the PHY ownership is not * held (regardless of the sink's connected live state), so * we'll just switch to disconnected mode from it here without * a note. */ - if (dig_port->tc_mode != TC_PORT_TBT_ALT) + if (dig_port->tc_init_mode != TC_PORT_TBT_ALT && + dig_port->tc_init_mode != TC_PORT_DISCONNECTED) drm_dbg_kms(&i915->drm, "Port %s: PHY left in %s mode on disabled port, disconnecting it\n", dig_port->tc_port_name, - tc_port_mode_name(dig_port->tc_mode)); + tc_port_mode_name(dig_port->tc_init_mode)); icl_tc_phy_disconnect(dig_port); + __intel_tc_port_put_link(dig_port); tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain, fetch_and_zero(&dig_port->tc_lock_wakeref)); @@ -768,16 +953,23 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port) * connected ports are usable, and avoids exposing to the users objects they * can't really use. */ +bool intel_tc_port_connected_locked(struct intel_encoder *encoder) +{ + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + + drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port)); + + return tc_port_live_status_mask(dig_port) & BIT(dig_port->tc_mode); +} + bool intel_tc_port_connected(struct intel_encoder *encoder) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); bool is_connected; intel_tc_port_lock(dig_port); - - is_connected = tc_port_live_status_mask(dig_port) & - BIT(dig_port->tc_mode); - + is_connected = intel_tc_port_connected_locked(encoder); intel_tc_port_unlock(dig_port); return is_connected; @@ -857,14 +1049,14 @@ void intel_tc_port_get_link(struct intel_digital_port *dig_port, int required_lanes) { __intel_tc_port_lock(dig_port, required_lanes); - dig_port->tc_link_refcount++; + __intel_tc_port_get_link(dig_port); intel_tc_port_unlock(dig_port); } void intel_tc_port_put_link(struct intel_digital_port *dig_port) { intel_tc_port_lock(dig_port); - --dig_port->tc_link_refcount; + __intel_tc_port_put_link(dig_port); intel_tc_port_unlock(dig_port); /* diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h index d54082e2d5e8..79667d977508 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.h +++ b/drivers/gpu/drm/i915/display/intel_tc.h @@ -9,6 +9,7 @@ #include <linux/mutex.h> #include <linux/types.h> +struct intel_crtc_state; struct intel_digital_port; struct intel_encoder; @@ -17,6 +18,7 @@ bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port); bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port); bool intel_tc_port_connected(struct intel_encoder *encoder); +bool intel_tc_port_connected_locked(struct intel_encoder *encoder); u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port); u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port); @@ -25,7 +27,8 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, int required_lanes); void intel_tc_port_init_mode(struct intel_digital_port *dig_port); -void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port); +void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port, + const struct intel_crtc_state *crtc_state); void intel_tc_port_lock(struct intel_digital_port *dig_port); void intel_tc_port_unlock(struct intel_digital_port *dig_port); void intel_tc_port_flush_work(struct intel_digital_port *dig_port); diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c index 571f5dda1e66..f8bf9810527d 100644 --- a/drivers/gpu/drm/i915/display/intel_vblank.c +++ b/drivers/gpu/drm/i915/display/intel_vblank.c @@ -8,6 +8,7 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_vblank.h" +#include "intel_vrr.h" /* * This timing diagram depicts the video signal in and @@ -439,3 +440,94 @@ void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc) { wait_for_pipe_scanline_moving(crtc, true); } + +static int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + + /* + * The scanline counter increments at the leading edge of hsync. + * + * On most platforms it starts counting from vtotal-1 on the + * first active line. That means the scanline counter value is + * always one less than what we would expect. Ie. just after + * start of vblank, which also occurs at start of hsync (on the + * last active line), the scanline counter will read vblank_start-1. + * + * On gen2 the scanline counter starts counting from 1 instead + * of vtotal-1, so we have to subtract one (or rather add vtotal-1 + * to keep the value positive), instead of adding one. + * + * On HSW+ the behaviour of the scanline counter depends on the output + * type. For DP ports it behaves like most other platforms, but on HDMI + * there's an extra 1 line difference. So we need to add two instead of + * one to the value. + * + * On VLV/CHV DSI the scanline counter would appear to increment + * approx. 1/3 of a scanline before start of vblank. Unfortunately + * that means we can't tell whether we're in vblank or not while + * we're on that particular line. We must still set scanline_offset + * to 1 so that the vblank timestamps come out correct when we query + * the scanline counter from within the vblank interrupt handler. + * However if queried just before the start of vblank we'll get an + * answer that's slightly in the future. + */ + if (DISPLAY_VER(i915) == 2) { + int vtotal; + + vtotal = adjusted_mode->crtc_vtotal; + if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) + vtotal /= 2; + + return vtotal - 1; + } else if (HAS_DDI(i915) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { + return 2; + } else { + return 1; + } +} + +void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct drm_display_mode adjusted_mode; + int vmax_vblank_start = 0; + unsigned long irqflags; + + drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode); + + if (crtc_state->vrr.enable) { + adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax; + adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax; + adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state); + vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state); + } + + /* + * Belts and suspenders locking to guarantee everyone sees 100% + * consistent state during fastset seamless refresh rate changes. + * + * vblank_time_lock takes care of all drm_vblank.c stuff, and + * uncore.lock takes care of __intel_get_crtc_scanline() which + * may get called elsewhere as well. + * + * TODO maybe just protect everything (including + * __intel_get_crtc_scanline()) with vblank_time_lock? + * Need to audit everything to make sure it's safe. + */ + spin_lock_irqsave(&i915->drm.vblank_time_lock, irqflags); + spin_lock(&i915->uncore.lock); + + drm_calc_timestamping_constants(&crtc->base, &adjusted_mode); + + crtc->vmax_vblank_start = vmax_vblank_start; + + crtc->mode_flags = crtc_state->mode_flags; + + crtc->scanline_offset = intel_crtc_scanline_offset(crtc_state); + + spin_unlock(&i915->uncore.lock); + spin_unlock_irqrestore(&i915->drm.vblank_time_lock, irqflags); +} diff --git a/drivers/gpu/drm/i915/display/intel_vblank.h b/drivers/gpu/drm/i915/display/intel_vblank.h index c9fea2c2a990..0884db7e76ae 100644 --- a/drivers/gpu/drm/i915/display/intel_vblank.h +++ b/drivers/gpu/drm/i915/display/intel_vblank.h @@ -11,6 +11,7 @@ struct drm_crtc; struct intel_crtc; +struct intel_crtc_state; u32 i915_get_vblank_counter(struct drm_crtc *crtc); u32 g4x_get_vblank_counter(struct drm_crtc *crtc); @@ -19,5 +20,6 @@ bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error, int intel_get_crtc_scanline(struct intel_crtc *crtc); void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc); void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc); +void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state); #endif /* __INTEL_VBLANK_H__ */ diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index ce55b8f09301..fd0065a46ec5 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -17,7 +17,6 @@ #include "intel_fb.h" #include "intel_fbc.h" #include "intel_psr.h" -#include "intel_sprite.h" #include "skl_scaler.h" #include "skl_universal_plane.h" #include "skl_watermark.h" diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c index f0af997d2a23..50a9a6adbe32 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.c +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -12,6 +12,7 @@ #include "intel_atomic.h" #include "intel_atomic_plane.h" #include "intel_bw.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display.h" #include "intel_display_power.h" @@ -704,6 +705,28 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, const struct skl_wm_level *result_prev, struct skl_wm_level *result /* out */); +static unsigned int skl_wm_latency(struct drm_i915_private *i915, int level, + const struct skl_wm_params *wp) +{ + unsigned int latency = i915->display.wm.skl_latency[level]; + + if (latency == 0) + return 0; + + /* + * WaIncreaseLatencyIPCEnabled: kbl,cfl + * Display WA #1141: kbl,cfl + */ + if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) && + skl_watermark_ipc_enabled(i915)) + latency += 4; + + if (skl_needs_memory_bw_wa(i915) && wp && wp->x_tiled) + latency += 15; + + return latency; +} + static unsigned int skl_cursor_allocation(const struct intel_crtc_state *crtc_state, int num_active) @@ -723,7 +746,7 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state, drm_WARN_ON(&i915->drm, ret); for (level = 0; level < i915->display.wm.num_levels; level++) { - unsigned int latency = i915->display.wm.skl_latency[level]; + unsigned int latency = skl_wm_latency(i915, level, &wp); skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm); if (wm.min_ddb_alloc == U16_MAX) @@ -1839,17 +1862,6 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, return; } - /* - * WaIncreaseLatencyIPCEnabled: kbl,cfl - * Display WA #1141: kbl,cfl - */ - if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) && - skl_watermark_ipc_enabled(i915)) - latency += 4; - - if (skl_needs_memory_bw_wa(i915) && wp->x_tiled) - latency += 15; - method1 = skl_wm_method1(i915, wp->plane_pixel_rate, wp->cpp, latency, wp->dbuf_block_size); method2 = skl_wm_method2(wp->plane_pixel_rate, @@ -1976,7 +1988,7 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, for (level = 0; level < i915->display.wm.num_levels; level++) { struct skl_wm_level *result = &levels[level]; - unsigned int latency = i915->display.wm.skl_latency[level]; + unsigned int latency = skl_wm_latency(i915, level, wm_params); skl_compute_plane_wm(crtc_state, plane, level, latency, wm_params, result_prev, result); @@ -1996,7 +2008,8 @@ static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state, unsigned int latency = 0; if (i915->display.sagv.block_time_us) - latency = i915->display.sagv.block_time_us + i915->display.wm.skl_latency[0]; + latency = i915->display.sagv.block_time_us + + skl_wm_latency(i915, 0, wm_params); skl_compute_plane_wm(crtc_state, plane, 0, latency, wm_params, &levels[0], @@ -2188,6 +2201,119 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, return 0; } +static bool +skl_is_vblank_too_short(const struct intel_crtc_state *crtc_state, + int wm0_lines, int latency) +{ + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + + /* FIXME missing scaler and DSC pre-fill time */ + return crtc_state->framestart_delay + + intel_usecs_to_scanlines(adjusted_mode, latency) + + wm0_lines > + adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vblank_start; +} + +static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + enum plane_id plane_id; + int wm0_lines = 0; + + for_each_plane_id_on_crtc(crtc, plane_id) { + const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; + + /* FIXME what about !skl_wm_has_lines() platforms? */ + wm0_lines = max_t(int, wm0_lines, wm->wm[0].lines); + } + + return wm0_lines; +} + +static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state, + int wm0_lines) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + int level; + + for (level = i915->display.wm.num_levels - 1; level >= 0; level--) { + int latency; + + /* FIXME should we care about the latency w/a's? */ + latency = skl_wm_latency(i915, level, NULL); + if (latency == 0) + continue; + + /* FIXME is it correct to use 0 latency for wm0 here? */ + if (level == 0) + latency = 0; + + if (!skl_is_vblank_too_short(crtc_state, wm0_lines, latency)) + return level; + } + + return -EINVAL; +} + +static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + int wm0_lines, level; + + if (!crtc_state->hw.active) + return 0; + + wm0_lines = skl_max_wm0_lines(crtc_state); + + level = skl_max_wm_level_for_vblank(crtc_state, wm0_lines); + if (level < 0) + return level; + + /* + * FIXME PSR needs to toggle LATENCY_REPORTING_REMOVED_PIPE_* + * based on whether we're limited by the vblank duration. + * + * FIXME also related to skl+ w/a 1136 (also unimplemented as of + * now) perhaps? + */ + + for (level++; level < i915->display.wm.num_levels; level++) { + enum plane_id plane_id; + + for_each_plane_id_on_crtc(crtc, plane_id) { + struct skl_plane_wm *wm = + &crtc_state->wm.skl.optimal.planes[plane_id]; + + /* + * FIXME just clear enable or flag the entire + * thing as bad via min_ddb_alloc=U16_MAX? + */ + wm->wm[level].enable = false; + wm->uv_wm[level].enable = false; + } + } + + if (DISPLAY_VER(i915) >= 12 && + i915->display.sagv.block_time_us && + skl_is_vblank_too_short(crtc_state, wm0_lines, + i915->display.sagv.block_time_us)) { + enum plane_id plane_id; + + for_each_plane_id_on_crtc(crtc, plane_id) { + struct skl_plane_wm *wm = + &crtc_state->wm.skl.optimal.planes[plane_id]; + + wm->sagv.wm0.enable = false; + wm->sagv.trans_wm.enable = false; + } + } + + return 0; +} + static int skl_build_pipe_wm(struct intel_atomic_state *state, struct intel_crtc *crtc) { @@ -2217,7 +2343,7 @@ static int skl_build_pipe_wm(struct intel_atomic_state *state, crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw; - return 0; + return skl_wm_check_vblank(crtc_state); } static void skl_ddb_entry_write(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index 8d2e6e151ba0..028965ab442d 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -1072,7 +1072,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, bpp = mipi_dsi_pixel_format_to_bpp( pixel_format_from_register_bits(fmt)); - pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc); + pipe_config->pipe_bpp = bdw_get_pipe_misc_bpp(crtc); /* Enable Frame time stamo based scanline reporting */ pipe_config->mode_flags |= diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h index e10507fa71ce..5d143e2a8db0 100644 --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h @@ -440,6 +440,8 @@ #define GSC_FW_LOAD GSC_INSTR(1, 0, 2) #define HECI1_FW_LIMIT_VALID (1 << 31) +#define GSC_HECI_CMD_PKT GSC_INSTR(0, 0, 6) + /* * Used to convert any address to canonical form. * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS, diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 256cecf99dd1..7a008e829d4d 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -736,12 +736,12 @@ int intel_gt_init(struct intel_gt *gt) if (err) goto err_gt; - intel_uc_init_late(>->uc); - err = i915_inject_probe_error(gt->i915, -EIO); if (err) goto err_gt; + intel_uc_init_late(>->uc); + intel_migrate_init(>->migrate, gt); goto out_fw; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c index 83df4cd5e06c..80dbbef86b1d 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c @@ -580,7 +580,7 @@ static bool perf_limit_reasons_eval(void *data) } DEFINE_SIMPLE_ATTRIBUTE(perf_limit_reasons_fops, perf_limit_reasons_get, - perf_limit_reasons_clear, "%llu\n"); + perf_limit_reasons_clear, "0x%llx\n"); void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root) { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c new file mode 100644 index 000000000000..ea0da06e2f39 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include "gt/intel_engine_pm.h" +#include "gt/intel_gpu_commands.h" +#include "gt/intel_gt.h" +#include "gt/intel_ring.h" +#include "intel_gsc_uc_heci_cmd_submit.h" + +struct gsc_heci_pkt { + u64 addr_in; + u32 size_in; + u64 addr_out; + u32 size_out; +}; + +static int emit_gsc_heci_pkt(struct i915_request *rq, struct gsc_heci_pkt *pkt) +{ + u32 *cs; + + cs = intel_ring_begin(rq, 8); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = GSC_HECI_CMD_PKT; + *cs++ = lower_32_bits(pkt->addr_in); + *cs++ = upper_32_bits(pkt->addr_in); + *cs++ = pkt->size_in; + *cs++ = lower_32_bits(pkt->addr_out); + *cs++ = upper_32_bits(pkt->addr_out); + *cs++ = pkt->size_out; + *cs++ = 0; + + intel_ring_advance(rq, cs); + + return 0; +} + +int intel_gsc_uc_heci_cmd_submit_packet(struct intel_gsc_uc *gsc, u64 addr_in, + u32 size_in, u64 addr_out, + u32 size_out) +{ + struct intel_context *ce = gsc->ce; + struct i915_request *rq; + struct gsc_heci_pkt pkt = { + .addr_in = addr_in, + .size_in = size_in, + .addr_out = addr_out, + .size_out = size_out + }; + int err; + + if (!ce) + return -ENODEV; + + rq = i915_request_create(ce); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + if (ce->engine->emit_init_breadcrumb) { + err = ce->engine->emit_init_breadcrumb(rq); + if (err) + goto out_rq; + } + + err = emit_gsc_heci_pkt(rq, &pkt); + + if (err) + goto out_rq; + + err = ce->engine->emit_flush(rq, 0); + +out_rq: + i915_request_get(rq); + + if (unlikely(err)) + i915_request_set_error_once(rq, err); + + i915_request_add(rq); + + if (!err && i915_request_wait(rq, 0, msecs_to_jiffies(500)) < 0) + err = -ETIME; + + i915_request_put(rq); + + if (err) + drm_err(&gsc_uc_to_gt(gsc)->i915->drm, + "Request submission for GSC heci cmd failed (%d)\n", + err); + + return err; +} + +void intel_gsc_uc_heci_cmd_emit_mtl_header(struct intel_gsc_mtl_header *header, + u8 heci_client_id, u32 message_size, + u64 host_session_id) +{ + host_session_id &= ~HOST_SESSION_MASK; + if (heci_client_id == HECI_MEADDRESS_PXP) + host_session_id |= HOST_SESSION_PXP_SINGLE; + + header->validity_marker = GSC_HECI_VALIDITY_MARKER; + header->heci_client_id = heci_client_id; + header->host_session_handle = host_session_id; + header->header_version = MTL_GSC_HEADER_VERSION; + header->message_size = message_size; +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h new file mode 100644 index 000000000000..3d56ae501991 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _INTEL_GSC_UC_HECI_CMD_SUBMIT_H_ +#define _INTEL_GSC_UC_HECI_CMD_SUBMIT_H_ + +#include <linux/types.h> + +struct intel_gsc_uc; +struct intel_gsc_mtl_header { + u32 validity_marker; +#define GSC_HECI_VALIDITY_MARKER 0xA578875A + + u8 heci_client_id; +#define HECI_MEADDRESS_PXP 17 +#define HECI_MEADDRESS_HDCP 18 + + u8 reserved1; + + u16 header_version; +#define MTL_GSC_HEADER_VERSION 1 + + /* + * FW allows host to decide host_session handle + * as it sees fit. + * For intertracebility reserving select bits(60-63) + * to differentiate caller-target subsystem + * 0000 - HDCP + * 0001 - PXP Single Session + */ + u64 host_session_handle; +#define HOST_SESSION_MASK REG_GENMASK64(63, 60) +#define HOST_SESSION_PXP_SINGLE BIT_ULL(60) + u64 gsc_message_handle; + + u32 message_size; /* lower 20 bits only, upper 12 are reserved */ + + /* + * Flags mask: + * Bit 0: Pending + * Bit 1: Session Cleanup; + * Bits 2-15: Flags + * Bits 16-31: Extension Size + * According to internal spec flags are either input or output + * we distinguish the flags using OUTFLAG or INFLAG + */ + u32 flags; +#define GSC_OUTFLAG_MSG_PENDING 1 + + u32 status; +} __packed; + +int intel_gsc_uc_heci_cmd_submit_packet(struct intel_gsc_uc *gsc, + u64 addr_in, u32 size_in, + u64 addr_out, u32 size_out); +void intel_gsc_uc_heci_cmd_emit_mtl_header(struct intel_gsc_mtl_header *header, + u8 heci_client_id, u32 message_size, + u64 host_session_id); +#endif diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index a53fd339e2cc..da249337c23b 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -535,7 +535,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) ret = i915_pcode_init(dev_priv); if (ret) - goto err_msi; + goto err_opregion; /* * Fill the dram structure to get the system dram info. This will be @@ -556,6 +556,8 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) return 0; +err_opregion: + intel_opregion_cleanup(dev_priv); err_msi: if (pdev->msi_enabled) pci_disable_msi(pdev); @@ -581,6 +583,8 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) i915_perf_fini(dev_priv); + intel_opregion_cleanup(dev_priv); + if (pdev->msi_enabled) pci_disable_msi(pdev); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 34b7d2ac33fb..d22ffd7a32dc 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1794,9 +1794,11 @@ * GEN9 clock gating regs */ #define GEN9_CLKGATE_DIS_0 _MMIO(0x46530) -#define DARBF_GATING_DIS (1 << 27) -#define PWM2_GATING_DIS (1 << 14) -#define PWM1_GATING_DIS (1 << 13) +#define DARBF_GATING_DIS REG_BIT(27) +#define MTL_PIPEDMC_GATING_DIS_A REG_BIT(15) +#define MTL_PIPEDMC_GATING_DIS_B REG_BIT(14) +#define PWM2_GATING_DIS REG_BIT(14) +#define PWM1_GATING_DIS REG_BIT(13) #define GEN9_CLKGATE_DIS_3 _MMIO(0x46538) #define TGL_VRH_GATING_DIS REG_BIT(31) @@ -3495,36 +3497,38 @@ #define _PIPE_MISC_A 0x70030 #define _PIPE_MISC_B 0x71030 -#define PIPEMISC_YUV420_ENABLE REG_BIT(27) /* glk+ */ -#define PIPEMISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */ -#define PIPEMISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */ -#define PIPEMISC_OUTPUT_COLORSPACE_YUV REG_BIT(11) -#define PIPEMISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */ +#define PIPE_MISC_YUV420_ENABLE REG_BIT(27) /* glk+ */ +#define PIPE_MISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */ +#define PIPE_MISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */ +#define PIPE_MISC_OUTPUT_COLORSPACE_YUV REG_BIT(11) +#define PIPE_MISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */ /* * For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with * valid values of: 6, 8, 10 BPC. * ADLP+, the bits 5-7 represent PORT OUTPUT BPC with valid values of: * 6, 8, 10, 12 BPC. */ -#define PIPEMISC_BPC_MASK REG_GENMASK(7, 5) -#define PIPEMISC_BPC_8 REG_FIELD_PREP(PIPEMISC_BPC_MASK, 0) -#define PIPEMISC_BPC_10 REG_FIELD_PREP(PIPEMISC_BPC_MASK, 1) -#define PIPEMISC_BPC_6 REG_FIELD_PREP(PIPEMISC_BPC_MASK, 2) -#define PIPEMISC_BPC_12_ADLP REG_FIELD_PREP(PIPEMISC_BPC_MASK, 4) /* adlp+ */ -#define PIPEMISC_DITHER_ENABLE REG_BIT(4) -#define PIPEMISC_DITHER_TYPE_MASK REG_GENMASK(3, 2) -#define PIPEMISC_DITHER_TYPE_SP REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 0) -#define PIPEMISC_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 1) -#define PIPEMISC_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 2) -#define PIPEMISC_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 3) -#define PIPEMISC(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC_A) +#define PIPE_MISC_BPC_MASK REG_GENMASK(7, 5) +#define PIPE_MISC_BPC_8 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 0) +#define PIPE_MISC_BPC_10 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 1) +#define PIPE_MISC_BPC_6 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 2) +#define PIPE_MISC_BPC_12_ADLP REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 4) /* adlp+ */ +#define PIPE_MISC_DITHER_ENABLE REG_BIT(4) +#define PIPE_MISC_DITHER_TYPE_MASK REG_GENMASK(3, 2) +#define PIPE_MISC_DITHER_TYPE_SP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 0) +#define PIPE_MISC_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 1) +#define PIPE_MISC_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 2) +#define PIPE_MISC_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 3) +#define PIPE_MISC(pipe) _MMIO_PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B) #define _PIPE_MISC2_A 0x7002C #define _PIPE_MISC2_B 0x7102C #define PIPE_MISC2_BUBBLE_COUNTER_MASK REG_GENMASK(31, 24) #define PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 80) #define PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 20) -#define PIPE_MISC2(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC2_A) +#define PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK REG_GENMASK(2, 0) /* tgl+ */ +#define PIPE_MISC2_FLIP_INFO_PLANE_SEL(plane_id) REG_FIELD_PREP(PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK, (plane_id)) +#define PIPE_MISC2(pipe) _MMIO_PIPE(pipe, _PIPE_MISC2_A, _PIPE_MISC2_B) /* Skylake+ pipe bottom (background) color */ #define _SKL_BOTTOM_COLOR_A 0x70034 @@ -4390,6 +4394,7 @@ #define SP_CONST_ALPHA_ENABLE REG_BIT(31) #define SP_CONST_ALPHA_MASK REG_GENMASK(7, 0) #define SP_CONST_ALPHA(alpha) REG_FIELD_PREP(SP_CONST_ALPHA_MASK, (alpha)) +#define _SPASURFLIVE (VLV_DISPLAY_BASE + 0x721ac) #define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0) #define SP_CONTRAST_MASK REG_GENMASK(26, 18) #define SP_CONTRAST(x) REG_FIELD_PREP(SP_CONTRAST_MASK, (x)) /* u3.6 */ @@ -4413,6 +4418,7 @@ #define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0) #define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4) #define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8) +#define _SPBSURFLIVE (VLV_DISPLAY_BASE + 0x722ac) #define _SPBCLRC0 (VLV_DISPLAY_BASE + 0x722d0) #define _SPBCLRC1 (VLV_DISPLAY_BASE + 0x722d4) #define _SPBGAMC (VLV_DISPLAY_BASE + 0x722e0) @@ -4433,6 +4439,7 @@ #define SPKEYMAXVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL) #define SPTILEOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF) #define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA) +#define SPSURFLIVE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURFLIVE, _SPBSURFLIVE) #define SPCLRC0(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0) #define SPCLRC1(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1) #define SPGAMC(pipe, plane_id, i) _MMIO(_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) + (5 - (i)) * 4) /* 6 x u0.10 */ @@ -4584,10 +4591,13 @@ #define _PLANE_KEYVAL_2_A 0x70294 #define _PLANE_KEYMSK_1_A 0x70198 #define _PLANE_KEYMSK_2_A 0x70298 -#define PLANE_KEYMSK_ALPHA_ENABLE (1 << 31) +#define PLANE_KEYMSK_ALPHA_ENABLE REG_BIT(31) #define _PLANE_KEYMAX_1_A 0x701a0 #define _PLANE_KEYMAX_2_A 0x702a0 -#define PLANE_KEYMAX_ALPHA(a) ((a) << 24) +#define PLANE_KEYMAX_ALPHA_MASK REG_GENMASK(31, 24) +#define PLANE_KEYMAX_ALPHA(a) REG_FIELD_PREP(PLANE_KEYMAX_ALPHA_MASK, (a)) +#define _PLANE_SURFLIVE_1_A 0x701ac +#define _PLANE_SURFLIVE_2_A 0x702ac #define _PLANE_CC_VAL_1_A 0x701b4 #define _PLANE_CC_VAL_2_A 0x702b4 #define _PLANE_AUX_DIST_1_A 0x701c0 @@ -4772,6 +4782,13 @@ #define PLANE_KEYMAX(pipe, plane) \ _MMIO_PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe)) +#define _PLANE_SURFLIVE_1_B 0x711ac +#define _PLANE_SURFLIVE_2_B 0x712ac +#define _PLANE_SURFLIVE_1(pipe) _PIPE(pipe, _PLANE_SURFLIVE_1_A, _PLANE_SURFLIVE_1_B) +#define _PLANE_SURFLIVE_2(pipe) _PIPE(pipe, _PLANE_SURFLIVE_2_A, _PLANE_SURFLIVE_2_B) +#define PLANE_SURFLIVE(pipe, plane) \ + _MMIO_PLANE(plane, _PLANE_SURFLIVE_1(pipe), _PLANE_SURFLIVE_2(pipe)) + #define _PLANE_BUF_CFG_1_B 0x7127c #define _PLANE_BUF_CFG_2_B 0x7137c /* skl+: 10 bits, icl+ 11 bits, adlp+ 12 bits */ @@ -7232,6 +7249,8 @@ enum skl_power_gate { #define DC_STATE_DISABLE 0 #define DC_STATE_EN_DC3CO REG_BIT(30) #define DC_STATE_DC3CO_STATUS REG_BIT(29) +#define HOLD_PHY_CLKREQ_PG1_LATCH REG_BIT(21) +#define HOLD_PHY_PG1_LATCH REG_BIT(20) #define DC_STATE_EN_UPTO_DC5 (1 << 0) #define DC_STATE_EN_DC9 (1 << 3) #define DC_STATE_EN_UPTO_DC6 (2 << 0) @@ -7541,9 +7560,29 @@ enum skl_power_gate { #define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT 12 #define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK (0xf << 12) +/* g4x+, except vlv/chv! */ #define _PIPE_FRMTMSTMP_A 0x70048 +#define _PIPE_FRMTMSTMP_B 0x71048 #define PIPE_FRMTMSTMP(pipe) \ - _MMIO_PIPE2(pipe, _PIPE_FRMTMSTMP_A) + _MMIO_PIPE(pipe, _PIPE_FRMTMSTMP_A, _PIPE_FRMTMSTMP_B) + +/* g4x+, except vlv/chv! */ +#define _PIPE_FLIPTMSTMP_A 0x7004C +#define _PIPE_FLIPTMSTMP_B 0x7104C +#define PIPE_FLIPTMSTMP(pipe) \ + _MMIO_PIPE(pipe, _PIPE_FLIPTMSTMP_A, _PIPE_FLIPTMSTMP_B) + +/* tgl+ */ +#define _PIPE_FLIPDONETMSTMP_A 0x70054 +#define _PIPE_FLIPDONETMSTMP_B 0x71054 +#define PIPE_FLIPDONETIMSTMP(pipe) \ + _MMIO_PIPE(pipe, _PIPE_FLIPDONETMSTMP_A, _PIPE_FLIPDONETMSTMP_B) + +#define _VLV_PIPE_MSA_MISC_A 0x70048 +#define VLV_PIPE_MSA_MISC(pipe) \ + _MMIO_PIPE2(pipe, _VLV_PIPE_MSA_MISC_A) +#define VLV_MSA_MISC1_HW_ENABLE REG_BIT(31) +#define VLV_MSA_MISC1_SW_S3D_MASK REG_GENMASK(2, 0) /* MSA MISC1 3:1 */ #define GGC _MMIO(0x108040) #define GMS_MASK REG_GENMASK(15, 8) diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c index 2b3fe469b360..091743e32e17 100644 --- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c +++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c @@ -789,9 +789,9 @@ static int iterate_bdw_plus_mmio(struct intel_gvt_mmio_table_iter *iter) MMIO_RING_D(RING_REG); #undef RING_REG - MMIO_D(PIPEMISC(PIPE_A)); - MMIO_D(PIPEMISC(PIPE_B)); - MMIO_D(PIPEMISC(PIPE_C)); + MMIO_D(PIPE_MISC(PIPE_A)); + MMIO_D(PIPE_MISC(PIPE_B)); + MMIO_D(PIPE_MISC(PIPE_C)); MMIO_D(_MMIO(0x1c1d0)); MMIO_D(GEN6_MBCUNIT_SNPCR); MMIO_D(GEN7_MISCCPCTL); diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 0f1ca0b0db49..10252dc11a22 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -277,21 +277,13 @@ static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit) int i, err; for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) { - struct dma_fence *fence = NULL; - if (!submit->in_sync[i]) continue; - err = drm_syncobj_find_fence(file, submit->in_sync[i], - 0, 0, &fence); + err = drm_sched_job_add_syncobj_dependency(&submit->task->base, file, + submit->in_sync[i], 0); if (err) return err; - - err = drm_sched_job_add_dependency(&submit->task->base, fence); - if (err) { - dma_fence_put(fence); - return err; - } } return 0; diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 6608a251106b..bb72fda9106d 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -325,23 +325,23 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) ret = meson_encoder_hdmi_init(priv); if (ret) - goto exit_afbcd; + goto unbind_all; ret = meson_plane_create(priv); if (ret) - goto exit_afbcd; + goto unbind_all; ret = meson_overlay_create(priv); if (ret) - goto exit_afbcd; + goto unbind_all; ret = meson_crtc_create(priv); if (ret) - goto exit_afbcd; + goto unbind_all; ret = request_irq(priv->vsync_irq, meson_irq, 0, drm->driver->name, drm); if (ret) - goto exit_afbcd; + goto unbind_all; drm_mode_config_reset(drm); @@ -359,6 +359,9 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) uninstall_irq: free_irq(priv->vsync_irq, drm); +unbind_all: + if (has_components) + component_unbind_all(drm->dev, drm); exit_afbcd: if (priv->afbcd.ops) priv->afbcd.ops->exit(priv); diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index 534621a13a34..3d046878ce6c 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -718,7 +718,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, dw_plat_data = &meson_dw_hdmi->dw_plat_data; ret = devm_regulator_get_enable_optional(dev, "hdmi"); - if (ret < 0) + if (ret < 0 && ret != -ENODEV) return ret; meson_dw_hdmi->hdmitx_apb = devm_reset_control_get_exclusive(dev, diff --git a/drivers/gpu/drm/meson/meson_vpp.c b/drivers/gpu/drm/meson/meson_vpp.c index 154837688ab0..5df1957c8e41 100644 --- a/drivers/gpu/drm/meson/meson_vpp.c +++ b/drivers/gpu/drm/meson/meson_vpp.c @@ -100,6 +100,8 @@ void meson_vpp_init(struct meson_drm *priv) priv->io_base + _REG(VPP_DOLBY_CTRL)); writel_relaxed(0x1020080, priv->io_base + _REG(VPP_DUMMY_DATA1)); + writel_relaxed(0x42020, + priv->io_base + _REG(VPP_DUMMY_DATA)); } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) writel_relaxed(0xf, priv->io_base + _REG(DOLBY_PATH_CTRL)); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index ce6b76c45b6f..2359dca80492 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -964,7 +964,7 @@ static void adreno_get_pwrlevels(struct device *dev, gpu->fast_rate = 0; /* You down with OPP? */ - if (!of_find_property(dev->of_node, "operating-points-v2", NULL)) + if (!of_property_present(dev->of_node, "operating-points-v2")) ret = adreno_get_legacy_pwrlevels(dev); else { ret = devm_pm_opp_of_add_table(dev); diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 051bdbc093cf..f38296ad8743 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -107,6 +107,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) bool (*shrink)(struct drm_gem_object *obj); bool cond; unsigned long freed; + unsigned long remaining; } stages[] = { /* Stages of progressively more aggressive/expensive reclaim: */ { &priv->lru.dontneed, purge, true }, @@ -116,14 +117,18 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) }; long nr = sc->nr_to_scan; unsigned long freed = 0; + unsigned long remaining = 0; for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) { if (!stages[i].cond) continue; stages[i].freed = - drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink); + drm_gem_lru_scan(stages[i].lru, nr, + &stages[i].remaining, + stages[i].shrink); nr -= stages[i].freed; freed += stages[i].freed; + remaining += stages[i].remaining; } if (freed) { @@ -132,7 +137,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) stages[3].freed); } - return (freed > 0) ? freed : SHRINK_STOP; + return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP; } #ifdef CONFIG_DEBUG_FS @@ -182,10 +187,12 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) NULL, }; unsigned idx, unmapped = 0; + unsigned long remaining = 0; for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) { unmapped += drm_gem_lru_scan(lrus[idx], vmap_shrink_limit - unmapped, + &remaining, vmap_shrink); } diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 01bfe0783304..926906ca2304 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -1871,6 +1871,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"), diff --git a/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c b/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c index 8c362c40227f..26d358b9b85a 100644 --- a/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c +++ b/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c @@ -418,7 +418,7 @@ static int d53e6ea8966_probe(struct spi_device *spi) if (IS_ERR(db->dsi_dev)) { dev_err(dev, "failed to register dsi device: %ld\n", PTR_ERR(db->dsi_dev)); - ret = PTR_ERR(db->dsi_dev); + return PTR_ERR(db->dsi_dev); } db->dsi_dev->lanes = 2; diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c index fe5f12f16a63..58dfb15a8757 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c @@ -4,6 +4,7 @@ #include <linux/clk.h> #include <linux/devfreq.h> #include <linux/devfreq_cooling.h> +#include <linux/nvmem-consumer.h> #include <linux/platform_device.h> #include <linux/pm_opp.h> @@ -82,6 +83,31 @@ static struct devfreq_dev_profile panfrost_devfreq_profile = { .get_dev_status = panfrost_devfreq_get_dev_status, }; +static int panfrost_read_speedbin(struct device *dev) +{ + u32 val; + int ret; + + ret = nvmem_cell_read_variable_le_u32(dev, "speed-bin", &val); + if (ret) { + /* + * -ENOENT means that this platform doesn't support speedbins + * as it didn't declare any speed-bin nvmem: in this case, we + * keep going without it; any other error means that we are + * supposed to read the bin value, but we failed doing so. + */ + if (ret != -ENOENT) { + DRM_DEV_ERROR(dev, "Cannot read speed-bin (%d).", ret); + return ret; + } + + return 0; + } + DRM_DEV_DEBUG(dev, "Using speed-bin = 0x%x\n", val); + + return devm_pm_opp_set_supported_hw(dev, &val, 1); +} + int panfrost_devfreq_init(struct panfrost_device *pfdev) { int ret; @@ -101,6 +127,10 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev) return 0; } + ret = panfrost_read_speedbin(dev); + if (ret) + return ret; + ret = devm_pm_opp_set_regulators(dev, pfdev->comp->supply_names); if (ret) { /* Continue if the optional regulator is missing */ diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h index d9ba68cffb77..b0126b9fbadc 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.h +++ b/drivers/gpu/drm/panfrost/panfrost_device.h @@ -23,7 +23,7 @@ struct panfrost_job; struct panfrost_perfcnt; #define NUM_JOB_SLOTS 3 -#define MAX_PM_DOMAINS 3 +#define MAX_PM_DOMAINS 5 struct panfrost_features { u16 id; diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index f49096f53141..bbada731bbbd 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -647,6 +647,14 @@ static const struct panfrost_compatible amlogic_data = { .vendor_quirk = panfrost_gpu_amlogic_quirk, }; +/* + * The old data with two power supplies for MT8183 is here only to + * keep retro-compatibility with older devicetrees, as DVFS will + * not work with this one. + * + * On new devicetrees please use the _b variant with a single and + * coupled regulators instead. + */ static const char * const mediatek_mt8183_supplies[] = { "mali", "sram", NULL }; static const char * const mediatek_mt8183_pm_domains[] = { "core0", "core1", "core2" }; static const struct panfrost_compatible mediatek_mt8183_data = { @@ -656,6 +664,32 @@ static const struct panfrost_compatible mediatek_mt8183_data = { .pm_domain_names = mediatek_mt8183_pm_domains, }; +static const char * const mediatek_mt8183_b_supplies[] = { "mali", NULL }; +static const struct panfrost_compatible mediatek_mt8183_b_data = { + .num_supplies = ARRAY_SIZE(mediatek_mt8183_b_supplies) - 1, + .supply_names = mediatek_mt8183_b_supplies, + .num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains), + .pm_domain_names = mediatek_mt8183_pm_domains, +}; + +static const char * const mediatek_mt8186_pm_domains[] = { "core0", "core1" }; +static const struct panfrost_compatible mediatek_mt8186_data = { + .num_supplies = ARRAY_SIZE(mediatek_mt8183_b_supplies) - 1, + .supply_names = mediatek_mt8183_b_supplies, + .num_pm_domains = ARRAY_SIZE(mediatek_mt8186_pm_domains), + .pm_domain_names = mediatek_mt8186_pm_domains, +}; + +static const char * const mediatek_mt8192_supplies[] = { "mali", NULL }; +static const char * const mediatek_mt8192_pm_domains[] = { "core0", "core1", "core2", + "core3", "core4" }; +static const struct panfrost_compatible mediatek_mt8192_data = { + .num_supplies = ARRAY_SIZE(mediatek_mt8192_supplies) - 1, + .supply_names = mediatek_mt8192_supplies, + .num_pm_domains = ARRAY_SIZE(mediatek_mt8192_pm_domains), + .pm_domain_names = mediatek_mt8192_pm_domains, +}; + static const struct of_device_id dt_match[] = { /* Set first to probe before the generic compatibles */ { .compatible = "amlogic,meson-gxm-mali", @@ -674,6 +708,9 @@ static const struct of_device_id dt_match[] = { { .compatible = "arm,mali-bifrost", .data = &default_data, }, { .compatible = "arm,mali-valhall-jm", .data = &default_data, }, { .compatible = "mediatek,mt8183-mali", .data = &mediatek_mt8183_data }, + { .compatible = "mediatek,mt8183b-mali", .data = &mediatek_mt8183_b_data }, + { .compatible = "mediatek,mt8186-mali", .data = &mediatek_mt8186_data }, + { .compatible = "mediatek,mt8192-mali", .data = &mediatek_mt8192_data }, {} }; MODULE_DEVICE_TABLE(of, dt_match); diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index 6452e4e900dd..d28b99732dde 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -204,6 +204,14 @@ static const struct panfrost_model gpu_models[] = { GPU_MODEL(g57, 0x9001, GPU_REV(g57, 0, 0)), + + /* MediaTek MT8192 has a Mali-G57 with a different GPU ID from the + * standard. Arm's driver does not appear to handle this model. + * ChromeOS has a hack downstream for it. Treat it as equivalent to + * standard Mali-G57 for now. + */ + GPU_MODEL(g57, 0x9003, + GPU_REV(g57, 0, 0)), }; static void panfrost_gpu_init_features(struct panfrost_device *pfdev) diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 4e83a1891f3e..666a5e53fe19 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -282,7 +282,7 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev, if (pm_runtime_active(pfdev->dev)) mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT); - pm_runtime_put_sync_autosuspend(pfdev->dev); + pm_runtime_put_autosuspend(pfdev->dev); } static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 008e172ed43b..d6d29be6b4f4 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -298,13 +298,26 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) escr = params.escr; } - if (rcdu->info->gen < 4) { + /* + * The ESCR register only exists in DU channels that can output to an + * LVDS or DPAT, and the OTAR register in DU channels that can output + * to a DPAD. + */ + if ((rcdu->info->routes[RCAR_DU_OUTPUT_DPAD0].possible_crtcs | + rcdu->info->routes[RCAR_DU_OUTPUT_DPAD1].possible_crtcs | + rcdu->info->routes[RCAR_DU_OUTPUT_LVDS0].possible_crtcs | + rcdu->info->routes[RCAR_DU_OUTPUT_LVDS1].possible_crtcs) & + BIT(rcrtc->index)) { dev_dbg(rcrtc->dev->dev, "%s: ESCR 0x%08x\n", __func__, escr); rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? ESCR13 : ESCR02, escr); - rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? OTAR13 : OTAR02, 0); } + if ((rcdu->info->routes[RCAR_DU_OUTPUT_DPAD0].possible_crtcs | + rcdu->info->routes[RCAR_DU_OUTPUT_DPAD1].possible_crtcs) & + BIT(rcrtc->index)) + rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? OTAR13 : OTAR02, 0); + /* Signal polarities */ dsmr = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0) | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0) @@ -749,16 +762,17 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc, /* * On D3/E3 the dot clock is provided by the LVDS encoder attached to - * the DU channel. We need to enable its clock output explicitly if - * the LVDS output is disabled. + * the DU channel. We need to enable its clock output explicitly before + * starting the CRTC, as the bridge hasn't been enabled by the atomic + * helpers yet. */ - if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) && - rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) { + if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index)) { + bool dot_clk_only = rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0); struct drm_bridge *bridge = rcdu->lvds[rcrtc->index]; const struct drm_display_mode *mode = &crtc->state->adjusted_mode; - rcar_lvds_pclk_enable(bridge, mode->clock * 1000); + rcar_lvds_pclk_enable(bridge, mode->clock * 1000, dot_clk_only); } /* @@ -795,15 +809,16 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc, rcar_du_crtc_stop(rcrtc); rcar_du_crtc_put(rcrtc); - if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) && - rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) { + if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index)) { + bool dot_clk_only = rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0); struct drm_bridge *bridge = rcdu->lvds[rcrtc->index]; /* * Disable the LVDS clock output, see - * rcar_du_crtc_atomic_enable(). + * rcar_du_crtc_atomic_enable(). When the LVDS output is used, + * this also disables the LVDS encoder. */ - rcar_lvds_pclk_disable(bridge); + rcar_lvds_pclk_disable(bridge, dot_clk_only); } if ((rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) && @@ -815,7 +830,6 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc, * Disable the DSI clock output, see * rcar_du_crtc_atomic_enable(). */ - rcar_mipi_dsi_pclk_disable(bridge); } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c index b1787be31e92..7ecec7b04a8d 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c @@ -109,8 +109,8 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu, renc = drmm_encoder_alloc(&rcdu->ddev, struct rcar_du_encoder, base, &rcar_du_encoder_funcs, DRM_MODE_ENCODER_NONE, NULL); - if (!renc) - return -ENOMEM; + if (IS_ERR(renc)) + return PTR_ERR(renc); renc->output = output; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c index 152602236377..2ccd2581f544 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_group.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c @@ -138,6 +138,7 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp) { struct rcar_du_device *rcdu = rgrp->dev; u32 defr7 = DEFR7_CODE; + u32 dorcr; /* Enable extended features */ rcar_du_group_write(rgrp, DEFR, DEFR_CODE | DEFR_DEFE); @@ -174,8 +175,15 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp) /* * Use DS1PR and DS2PR to configure planes priorities and connects the * superposition 0 to DU0 pins. DU1 pins will be configured dynamically. + * + * Groups that have a single channel have a hardcoded configuration. On + * Gen3 and newer, the documentation requires PG1T, DK1S and PG1D_DS1 to + * always be set in this case. */ - rcar_du_group_write(rgrp, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS); + dorcr = DORCR_PG0D_DS0 | DORCR_DPRS; + if (rcdu->info->gen >= 3 && rgrp->num_crtcs == 1) + dorcr |= DORCR_PG1T | DORCR_DK1S | DORCR_PG1D_DS1; + rcar_du_group_write(rgrp, DORCR, dorcr); /* Apply planes to CRTCs association. */ mutex_lock(&rgrp->lock); @@ -349,7 +357,7 @@ int rcar_du_group_set_routing(struct rcar_du_group *rgrp) struct rcar_du_device *rcdu = rgrp->dev; u32 dorcr = rcar_du_group_read(rgrp, DORCR); - dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK); + dorcr &= ~(DORCR_PG1T | DORCR_DK1S | DORCR_PG1D_MASK); /* * Set the DPAD1 pins sources. Select CRTC 0 if explicitly requested and @@ -357,9 +365,9 @@ int rcar_du_group_set_routing(struct rcar_du_group *rgrp) * by default. */ if (rcdu->dpad1_source == rgrp->index * 2) - dorcr |= DORCR_PG2D_DS1; + dorcr |= DORCR_PG1D_DS0; else - dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2; + dorcr |= DORCR_PG1T | DORCR_DK1S | DORCR_PG1D_DS1; rcar_du_group_write(rgrp, DORCR, dorcr); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h index 789ae9285108..6c750fab6ebb 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h @@ -511,19 +511,19 @@ */ #define DORCR 0x11000 -#define DORCR_PG2T (1 << 30) -#define DORCR_DK2S (1 << 28) -#define DORCR_PG2D_DS1 (0 << 24) -#define DORCR_PG2D_DS2 (1 << 24) -#define DORCR_PG2D_FIX0 (2 << 24) -#define DORCR_PG2D_DOOR (3 << 24) -#define DORCR_PG2D_MASK (3 << 24) -#define DORCR_DR1D (1 << 21) -#define DORCR_PG1D_DS1 (0 << 16) -#define DORCR_PG1D_DS2 (1 << 16) -#define DORCR_PG1D_FIX0 (2 << 16) -#define DORCR_PG1D_DOOR (3 << 16) -#define DORCR_PG1D_MASK (3 << 16) +#define DORCR_PG1T (1 << 30) +#define DORCR_DK1S (1 << 28) +#define DORCR_PG1D_DS0 (0 << 24) +#define DORCR_PG1D_DS1 (1 << 24) +#define DORCR_PG1D_FIX0 (2 << 24) +#define DORCR_PG1D_DOOR (3 << 24) +#define DORCR_PG1D_MASK (3 << 24) +#define DORCR_DR0D (1 << 21) +#define DORCR_PG0D_DS0 (0 << 16) +#define DORCR_PG0D_DS1 (1 << 16) +#define DORCR_PG0D_FIX0 (2 << 16) +#define DORCR_PG0D_DOOR (3 << 16) +#define DORCR_PG0D_MASK (3 << 16) #define DORCR_RGPV (1 << 4) #define DORCR_DPRS (1 << 0) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c index fe90be51d64e..45c05d0ffc70 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c @@ -73,7 +73,7 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) .src.y2 = mode->vdisplay << 16, .zpos = 0, }, - .format = rcar_du_format_info(DRM_FORMAT_ARGB8888), + .format = rcar_du_format_info(DRM_FORMAT_XRGB8888), .source = RCAR_DU_PLANE_VSPD1, .colorkey = 0, }; diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c index 260ea5d8624e..ca215b588fd7 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds.c +++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c @@ -269,8 +269,8 @@ done: pll->pll_m, pll->pll_n, pll->pll_e, pll->div); } -static void __rcar_lvds_pll_setup_d3_e3(struct rcar_lvds *lvds, - unsigned int freq, bool dot_clock_only) +static void rcar_lvds_pll_setup_d3_e3(struct rcar_lvds *lvds, + unsigned int freq, bool dot_clock_only) { struct pll_info pll = { .diff = (unsigned long)-1 }; u32 lvdpllcr; @@ -305,52 +305,8 @@ static void __rcar_lvds_pll_setup_d3_e3(struct rcar_lvds *lvds, rcar_lvds_write(lvds, LVDDIV, 0); } -static void rcar_lvds_pll_setup_d3_e3(struct rcar_lvds *lvds, unsigned int freq) -{ - __rcar_lvds_pll_setup_d3_e3(lvds, freq, false); -} - /* ----------------------------------------------------------------------------- - * Clock - D3/E3 only - */ - -int rcar_lvds_pclk_enable(struct drm_bridge *bridge, unsigned long freq) -{ - struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); - int ret; - - if (WARN_ON(!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL))) - return -ENODEV; - - dev_dbg(lvds->dev, "enabling LVDS PLL, freq=%luHz\n", freq); - - ret = pm_runtime_resume_and_get(lvds->dev); - if (ret) - return ret; - - __rcar_lvds_pll_setup_d3_e3(lvds, freq, true); - - return 0; -} -EXPORT_SYMBOL_GPL(rcar_lvds_pclk_enable); - -void rcar_lvds_pclk_disable(struct drm_bridge *bridge) -{ - struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); - - if (WARN_ON(!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL))) - return; - - dev_dbg(lvds->dev, "disabling LVDS PLL\n"); - - rcar_lvds_write(lvds, LVDPLLCR, 0); - - pm_runtime_put_sync(lvds->dev); -} -EXPORT_SYMBOL_GPL(rcar_lvds_pclk_disable); - -/* ----------------------------------------------------------------------------- - * Bridge + * Enable/disable */ static enum rcar_lvds_mode rcar_lvds_get_lvds_mode(struct rcar_lvds *lvds, @@ -394,10 +350,10 @@ static enum rcar_lvds_mode rcar_lvds_get_lvds_mode(struct rcar_lvds *lvds, return mode; } -static void __rcar_lvds_atomic_enable(struct drm_bridge *bridge, - struct drm_atomic_state *state, - struct drm_crtc *crtc, - struct drm_connector *connector) +static void rcar_lvds_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state, + struct drm_crtc *crtc, + struct drm_connector *connector) { struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); u32 lvdhcr; @@ -410,8 +366,7 @@ static void __rcar_lvds_atomic_enable(struct drm_bridge *bridge, /* Enable the companion LVDS encoder in dual-link mode. */ if (lvds->link_type != RCAR_LVDS_SINGLE_LINK && lvds->companion) - __rcar_lvds_atomic_enable(lvds->companion, state, crtc, - connector); + rcar_lvds_enable(lvds->companion, state, crtc, connector); /* * Hardcode the channels and control signals routing for now. @@ -465,8 +420,12 @@ static void __rcar_lvds_atomic_enable(struct drm_bridge *bridge, /* * PLL clock configuration on all instances but the companion in * dual-link mode. + * + * The extended PLL has been turned on by an explicit call to + * rcar_lvds_pclk_enable() from the DU driver. */ - if (lvds->link_type == RCAR_LVDS_SINGLE_LINK || lvds->companion) { + if ((lvds->link_type == RCAR_LVDS_SINGLE_LINK || lvds->companion) && + !(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)) { const struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); const struct drm_display_mode *mode = @@ -531,22 +490,7 @@ static void __rcar_lvds_atomic_enable(struct drm_bridge *bridge, rcar_lvds_write(lvds, LVDCR0, lvdcr0); } -static void rcar_lvds_atomic_enable(struct drm_bridge *bridge, - struct drm_bridge_state *old_bridge_state) -{ - struct drm_atomic_state *state = old_bridge_state->base.state; - struct drm_connector *connector; - struct drm_crtc *crtc; - - connector = drm_atomic_get_new_connector_for_encoder(state, - bridge->encoder); - crtc = drm_atomic_get_new_connector_state(state, connector)->crtc; - - __rcar_lvds_atomic_enable(bridge, state, crtc, connector); -} - -static void rcar_lvds_atomic_disable(struct drm_bridge *bridge, - struct drm_bridge_state *old_bridge_state) +static void rcar_lvds_disable(struct drm_bridge *bridge) { struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); u32 lvdcr0; @@ -578,15 +522,99 @@ static void rcar_lvds_atomic_disable(struct drm_bridge *bridge, rcar_lvds_write(lvds, LVDCR0, 0); rcar_lvds_write(lvds, LVDCR1, 0); - rcar_lvds_write(lvds, LVDPLLCR, 0); + + /* The extended PLL is turned off in rcar_lvds_pclk_disable(). */ + if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)) + rcar_lvds_write(lvds, LVDPLLCR, 0); /* Disable the companion LVDS encoder in dual-link mode. */ if (lvds->link_type != RCAR_LVDS_SINGLE_LINK && lvds->companion) - lvds->companion->funcs->atomic_disable(lvds->companion, - old_bridge_state); + rcar_lvds_disable(lvds->companion); + + pm_runtime_put_sync(lvds->dev); +} + +/* ----------------------------------------------------------------------------- + * Clock - D3/E3 only + */ + +int rcar_lvds_pclk_enable(struct drm_bridge *bridge, unsigned long freq, + bool dot_clk_only) +{ + struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); + int ret; + + if (WARN_ON(!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL))) + return -ENODEV; + + dev_dbg(lvds->dev, "enabling LVDS PLL, freq=%luHz\n", freq); + + ret = pm_runtime_resume_and_get(lvds->dev); + if (ret) + return ret; + + rcar_lvds_pll_setup_d3_e3(lvds, freq, dot_clk_only); + + return 0; +} +EXPORT_SYMBOL_GPL(rcar_lvds_pclk_enable); + +void rcar_lvds_pclk_disable(struct drm_bridge *bridge, bool dot_clk_only) +{ + struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); + + if (WARN_ON(!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL))) + return; + + dev_dbg(lvds->dev, "disabling LVDS PLL\n"); + + if (!dot_clk_only) + rcar_lvds_disable(bridge); + + rcar_lvds_write(lvds, LVDPLLCR, 0); pm_runtime_put_sync(lvds->dev); } +EXPORT_SYMBOL_GPL(rcar_lvds_pclk_disable); + +/* ----------------------------------------------------------------------------- + * Bridge + */ + +static void rcar_lvds_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct drm_atomic_state *state = old_bridge_state->base.state; + struct drm_connector *connector; + struct drm_crtc *crtc; + + connector = drm_atomic_get_new_connector_for_encoder(state, + bridge->encoder); + crtc = drm_atomic_get_new_connector_state(state, connector)->crtc; + + rcar_lvds_enable(bridge, state, crtc, connector); +} + +static void rcar_lvds_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); + + /* + * For D3 and E3, disabling the LVDS encoder before the DU would stall + * the DU, causing a vblank wait timeout when stopping the DU. This has + * been traced to clearing the LVEN bit, but the exact reason is + * unknown. Keep the encoder enabled, it will be disabled by an explicit + * call to rcar_lvds_pclk_disable() from the DU driver. + * + * We could clear the LVRES bit already to disable the LVDS output, but + * that's likely pointless. + */ + if (lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL) + return; + + rcar_lvds_disable(bridge); +} static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge, const struct drm_display_mode *mode, @@ -922,14 +950,12 @@ static const struct rcar_lvds_device_info rcar_lvds_r8a77990_info = { .gen = 3, .quirks = RCAR_LVDS_QUIRK_GEN3_LVEN | RCAR_LVDS_QUIRK_EXT_PLL | RCAR_LVDS_QUIRK_DUAL_LINK, - .pll_setup = rcar_lvds_pll_setup_d3_e3, }; static const struct rcar_lvds_device_info rcar_lvds_r8a77995_info = { .gen = 3, .quirks = RCAR_LVDS_QUIRK_GEN3_LVEN | RCAR_LVDS_QUIRK_PWD | RCAR_LVDS_QUIRK_EXT_PLL | RCAR_LVDS_QUIRK_DUAL_LINK, - .pll_setup = rcar_lvds_pll_setup_d3_e3, }; static const struct of_device_id rcar_lvds_of_table[] = { diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.h b/drivers/gpu/drm/rcar-du/rcar_lvds.h index bee7033b60d6..887c63500000 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds.h +++ b/drivers/gpu/drm/rcar-du/rcar_lvds.h @@ -13,17 +13,21 @@ struct drm_bridge; #if IS_ENABLED(CONFIG_DRM_RCAR_LVDS) -int rcar_lvds_pclk_enable(struct drm_bridge *bridge, unsigned long freq); -void rcar_lvds_pclk_disable(struct drm_bridge *bridge); +int rcar_lvds_pclk_enable(struct drm_bridge *bridge, unsigned long freq, + bool dot_clk_only); +void rcar_lvds_pclk_disable(struct drm_bridge *bridge, bool dot_clk_only); bool rcar_lvds_dual_link(struct drm_bridge *bridge); bool rcar_lvds_is_connected(struct drm_bridge *bridge); #else static inline int rcar_lvds_pclk_enable(struct drm_bridge *bridge, - unsigned long freq) + unsigned long freq, bool dot_clk_only) { return -ENOSYS; } -static inline void rcar_lvds_pclk_disable(struct drm_bridge *bridge) { } +static inline void rcar_lvds_pclk_disable(struct drm_bridge *bridge, + bool dot_clock_only) +{ +} static inline bool rcar_lvds_dual_link(struct drm_bridge *bridge) { return false; diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 293dcf0f0593..112699949db9 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -667,6 +667,7 @@ static void dw_hdmi_rockchip_unbind(struct device *dev, struct device *master, struct rockchip_hdmi *hdmi = dev_get_drvdata(dev); dw_hdmi_unbind(hdmi->hdmi); + drm_encoder_cleanup(&hdmi->encoder.encoder); clk_disable_unprepare(hdmi->ref_clk); regulator_disable(hdmi->avdd_1v8); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index 03ca32cd2050..38554ca2fc39 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -2301,7 +2301,7 @@ static int vop2_create_crtcs(struct vop2 *vop2) nvp = 0; for (i = 0; i < vop2->registered_num_wins; i++) { struct vop2_win *win = &vop2->win[i]; - u32 possible_crtcs; + u32 possible_crtcs = 0; if (vop2->data->soc_id == 3566) { /* @@ -2327,12 +2327,11 @@ static int vop2_create_crtcs(struct vop2 *vop2) /* change the unused primary window to overlay window */ win->type = DRM_PLANE_TYPE_OVERLAY; } - } else if (win->type == DRM_PLANE_TYPE_OVERLAY) { - possible_crtcs = (1 << nvps) - 1; - } else { - possible_crtcs = 0; } + if (win->type == DRM_PLANE_TYPE_OVERLAY) + possible_crtcs = (1 << nvps) - 1; + ret = vop2_plane_init(vop2, win, possible_crtcs); if (ret) { drm_err(vop2->drm, "failed to init plane %s: %d\n", @@ -2680,6 +2679,8 @@ static int vop2_bind(struct device *dev, struct device *master, void *data) vop2->len = resource_size(res); vop2->map = devm_regmap_init_mmio(dev, vop2->regs, &vop2_regmap_config); + if (IS_ERR(vop2->map)) + return PTR_ERR(vop2->map); ret = vop2_win_init(vop2); if (ret) diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c index 7fd869520ef2..fe9c6468e440 100644 --- a/drivers/gpu/drm/scheduler/sched_fence.c +++ b/drivers/gpu/drm/scheduler/sched_fence.c @@ -123,6 +123,37 @@ static void drm_sched_fence_release_finished(struct dma_fence *f) dma_fence_put(&fence->scheduled); } +static void drm_sched_fence_set_deadline_finished(struct dma_fence *f, + ktime_t deadline) +{ + struct drm_sched_fence *fence = to_drm_sched_fence(f); + struct dma_fence *parent; + unsigned long flags; + + spin_lock_irqsave(&fence->lock, flags); + + /* If we already have an earlier deadline, keep it: */ + if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags) && + ktime_before(fence->deadline, deadline)) { + spin_unlock_irqrestore(&fence->lock, flags); + return; + } + + fence->deadline = deadline; + set_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags); + + spin_unlock_irqrestore(&fence->lock, flags); + + /* + * smp_load_aquire() to ensure that if we are racing another + * thread calling drm_sched_fence_set_parent(), that we see + * the parent set before it calls test_bit(HAS_DEADLINE_BIT) + */ + parent = smp_load_acquire(&fence->parent); + if (parent) + dma_fence_set_deadline(parent, deadline); +} + static const struct dma_fence_ops drm_sched_fence_ops_scheduled = { .get_driver_name = drm_sched_fence_get_driver_name, .get_timeline_name = drm_sched_fence_get_timeline_name, @@ -133,6 +164,7 @@ static const struct dma_fence_ops drm_sched_fence_ops_finished = { .get_driver_name = drm_sched_fence_get_driver_name, .get_timeline_name = drm_sched_fence_get_timeline_name, .release = drm_sched_fence_release_finished, + .set_deadline = drm_sched_fence_set_deadline_finished, }; struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f) @@ -147,6 +179,20 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f) } EXPORT_SYMBOL(to_drm_sched_fence); +void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence, + struct dma_fence *fence) +{ + /* + * smp_store_release() to ensure another thread racing us + * in drm_sched_fence_set_deadline_finished() sees the + * fence's parent set before test_bit() + */ + smp_store_release(&s_fence->parent, dma_fence_get(fence)); + if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, + &s_fence->finished.flags)) + dma_fence_set_deadline(fence, s_fence->deadline); +} + struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity, void *owner) { diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 214364fccb71..f6801eb21820 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -722,7 +722,7 @@ EXPORT_SYMBOL(drm_sched_job_add_dependency); /** * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency * @job: scheduler job to add the dependencies to - * @file_private: drm file private pointer + * @file: drm file private pointer * @handle: syncobj handle to lookup * @point: timeline point * @@ -1048,7 +1048,7 @@ static int drm_sched_main(void *param) drm_sched_fence_scheduled(s_fence); if (!IS_ERR_OR_NULL(fence)) { - s_fence->parent = dma_fence_get(fence); + drm_sched_fence_set_parent(s_fence, fence); /* Drop for original kref_init of the fence */ dma_fence_put(fence); diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index 38070fc261f3..b11dbd50d73e 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c @@ -792,7 +792,7 @@ static int sun4i_backend_bind(struct device *dev, struct device *master, dev_set_drvdata(dev, backend); spin_lock_init(&backend->frontend_lock); - if (of_find_property(dev->of_node, "interconnects", NULL)) { + if (of_property_present(dev->of_node, "interconnects")) { /* * This assume we have the same DMA constraints for all our the * devices in our pipeline (all the backends, but also the diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index dd283a3a4e36..e49f78a6a8cf 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -95,12 +95,12 @@ static int sun4i_drv_bind(struct device *dev) /* drm_vblank_init calls kcalloc, which can fail */ ret = drm_vblank_init(drm, drm->mode_config.num_crtc); if (ret) - goto cleanup_mode_config; + goto unbind_all; /* Remove early framebuffers (ie. simplefb) */ ret = drm_aperture_remove_framebuffers(false, &sun4i_drv_driver); if (ret) - goto cleanup_mode_config; + goto unbind_all; sun4i_framebuffer_init(drm); @@ -119,6 +119,8 @@ static int sun4i_drv_bind(struct device *dev) finish_poll: drm_kms_helper_poll_fini(drm); +unbind_all: + component_unbind_all(dev, NULL); cleanup_mode_config: drm_mode_config_cleanup(drm); of_reserved_mem_device_release(dev); diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c index bafee05f6b24..11d5244a5aa5 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.c +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c @@ -391,7 +391,7 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master, mixer->engine.ops = &sun8i_engine_ops; mixer->engine.node = dev->of_node; - if (of_find_property(dev->of_node, "iommus", NULL)) { + if (of_property_present(dev->of_node, "iommus")) { /* * This assume we have the same DMA constraints for * all our the mixers in our pipeline. This sounds diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c index 84b5cc29c8fc..474bb7a1c4ee 100644 --- a/drivers/gpu/drm/tests/drm_format_helper_test.c +++ b/drivers/gpu/drm/tests/drm_format_helper_test.c @@ -67,6 +67,11 @@ struct convert_to_argb2101010_result { const u32 expected[TEST_BUF_SIZE]; }; +struct convert_to_mono_result { + unsigned int dst_pitch; + const u8 expected[TEST_BUF_SIZE]; +}; + struct convert_xrgb8888_case { const char *name; unsigned int pitch; @@ -82,6 +87,7 @@ struct convert_xrgb8888_case { struct convert_to_argb8888_result argb8888_result; struct convert_to_xrgb2101010_result xrgb2101010_result; struct convert_to_argb2101010_result argb2101010_result; + struct convert_to_mono_result mono_result; }; static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { @@ -131,6 +137,10 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { .dst_pitch = 0, .expected = { 0xFFF00000 }, }, + .mono_result = { + .dst_pitch = 0, + .expected = { 0b0 }, + }, }, { .name = "single_pixel_clip_rectangle", @@ -181,6 +191,10 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { .dst_pitch = 0, .expected = { 0xFFF00000 }, }, + .mono_result = { + .dst_pitch = 0, + .expected = { 0b0 }, + }, }, { /* Well known colors: White, black, red, green, blue, magenta, @@ -293,6 +307,15 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { 0xFFFFFC00, 0xC00FFFFF, }, }, + .mono_result = { + .dst_pitch = 0, + .expected = { + 0b01, + 0b10, + 0b00, + 0b11, + }, + }, }, { /* Randomly picked colors. Full buffer within the clip area. */ @@ -300,96 +323,104 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { .pitch = 3 * 4, .clip = DRM_RECT_INIT(0, 0, 3, 3), .xrgb8888 = { - 0xA10E449C, 0xB1114D05, 0xC1A80303, - 0xD16C7073, 0xA20E449C, 0xB2114D05, - 0xC2A80303, 0xD26C7073, 0xA30E449C, + 0xA10E449C, 0xB1114D05, 0xC1A8F303, + 0xD16CF073, 0xA20E449C, 0xB2114D05, + 0xC2A80303, 0xD26CF073, 0xA30E449C, }, .gray8_result = { .dst_pitch = 5, .expected = { - 0x3C, 0x33, 0x34, 0x00, 0x00, - 0x6F, 0x3C, 0x33, 0x00, 0x00, - 0x34, 0x6F, 0x3C, 0x00, 0x00, + 0x3C, 0x33, 0xC4, 0x00, 0x00, + 0xBB, 0x3C, 0x33, 0x00, 0x00, + 0x34, 0xBB, 0x3C, 0x00, 0x00, }, }, .rgb332_result = { .dst_pitch = 5, .expected = { - 0x0A, 0x08, 0xA0, 0x00, 0x00, - 0x6D, 0x0A, 0x08, 0x00, 0x00, - 0xA0, 0x6D, 0x0A, 0x00, 0x00, + 0x0A, 0x08, 0xBC, 0x00, 0x00, + 0x7D, 0x0A, 0x08, 0x00, 0x00, + 0xA0, 0x7D, 0x0A, 0x00, 0x00, }, }, .rgb565_result = { .dst_pitch = 10, .expected = { - 0x0A33, 0x1260, 0xA800, 0x0000, 0x0000, - 0x6B8E, 0x0A33, 0x1260, 0x0000, 0x0000, - 0xA800, 0x6B8E, 0x0A33, 0x0000, 0x0000, + 0x0A33, 0x1260, 0xAF80, 0x0000, 0x0000, + 0x6F8E, 0x0A33, 0x1260, 0x0000, 0x0000, + 0xA800, 0x6F8E, 0x0A33, 0x0000, 0x0000, }, .expected_swab = { - 0x330A, 0x6012, 0x00A8, 0x0000, 0x0000, - 0x8E6B, 0x330A, 0x6012, 0x0000, 0x0000, - 0x00A8, 0x8E6B, 0x330A, 0x0000, 0x0000, + 0x330A, 0x6012, 0x80AF, 0x0000, 0x0000, + 0x8E6F, 0x330A, 0x6012, 0x0000, 0x0000, + 0x00A8, 0x8E6F, 0x330A, 0x0000, 0x0000, }, }, .xrgb1555_result = { .dst_pitch = 10, .expected = { - 0x0513, 0x0920, 0x5400, 0x0000, 0x0000, - 0x35CE, 0x0513, 0x0920, 0x0000, 0x0000, - 0x5400, 0x35CE, 0x0513, 0x0000, 0x0000, + 0x0513, 0x0920, 0x57C0, 0x0000, 0x0000, + 0x37CE, 0x0513, 0x0920, 0x0000, 0x0000, + 0x5400, 0x37CE, 0x0513, 0x0000, 0x0000, }, }, .argb1555_result = { .dst_pitch = 10, .expected = { - 0x8513, 0x8920, 0xD400, 0x0000, 0x0000, - 0xB5CE, 0x8513, 0x8920, 0x0000, 0x0000, - 0xD400, 0xB5CE, 0x8513, 0x0000, 0x0000, + 0x8513, 0x8920, 0xD7C0, 0x0000, 0x0000, + 0xB7CE, 0x8513, 0x8920, 0x0000, 0x0000, + 0xD400, 0xB7CE, 0x8513, 0x0000, 0x0000, }, }, .rgba5551_result = { .dst_pitch = 10, .expected = { - 0x0A27, 0x1241, 0xA801, 0x0000, 0x0000, - 0x6B9D, 0x0A27, 0x1241, 0x0000, 0x0000, - 0xA801, 0x6B9D, 0x0A27, 0x0000, 0x0000, + 0x0A27, 0x1241, 0xAF81, 0x0000, 0x0000, + 0x6F9D, 0x0A27, 0x1241, 0x0000, 0x0000, + 0xA801, 0x6F9D, 0x0A27, 0x0000, 0x0000, }, }, .rgb888_result = { .dst_pitch = 15, .expected = { - 0x9C, 0x44, 0x0E, 0x05, 0x4D, 0x11, 0x03, 0x03, 0xA8, + 0x9C, 0x44, 0x0E, 0x05, 0x4D, 0x11, 0x03, 0xF3, 0xA8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x73, 0x70, 0x6C, 0x9C, 0x44, 0x0E, 0x05, 0x4D, 0x11, + 0x73, 0xF0, 0x6C, 0x9C, 0x44, 0x0E, 0x05, 0x4D, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x03, 0x03, 0xA8, 0x73, 0x70, 0x6C, 0x9C, 0x44, 0x0E, + 0x03, 0x03, 0xA8, 0x73, 0xF0, 0x6C, 0x9C, 0x44, 0x0E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }, }, .argb8888_result = { .dst_pitch = 20, .expected = { - 0xFF0E449C, 0xFF114D05, 0xFFA80303, 0x00000000, 0x00000000, - 0xFF6C7073, 0xFF0E449C, 0xFF114D05, 0x00000000, 0x00000000, - 0xFFA80303, 0xFF6C7073, 0xFF0E449C, 0x00000000, 0x00000000, + 0xFF0E449C, 0xFF114D05, 0xFFA8F303, 0x00000000, 0x00000000, + 0xFF6CF073, 0xFF0E449C, 0xFF114D05, 0x00000000, 0x00000000, + 0xFFA80303, 0xFF6CF073, 0xFF0E449C, 0x00000000, 0x00000000, }, }, .xrgb2101010_result = { .dst_pitch = 20, .expected = { - 0x03844672, 0x0444D414, 0x2A20300C, 0x00000000, 0x00000000, - 0x1B1705CD, 0x03844672, 0x0444D414, 0x00000000, 0x00000000, - 0x2A20300C, 0x1B1705CD, 0x03844672, 0x00000000, 0x00000000, + 0x03844672, 0x0444D414, 0x2A2F3C0C, 0x00000000, 0x00000000, + 0x1B1F0DCD, 0x03844672, 0x0444D414, 0x00000000, 0x00000000, + 0x2A20300C, 0x1B1F0DCD, 0x03844672, 0x00000000, 0x00000000, }, }, .argb2101010_result = { .dst_pitch = 20, .expected = { - 0xC3844672, 0xC444D414, 0xEA20300C, 0x00000000, 0x00000000, - 0xDB1705CD, 0xC3844672, 0xC444D414, 0x00000000, 0x00000000, - 0xEA20300C, 0xDB1705CD, 0xC3844672, 0x00000000, 0x00000000, + 0xC3844672, 0xC444D414, 0xEA2F3C0C, 0x00000000, 0x00000000, + 0xDB1F0DCD, 0xC3844672, 0xC444D414, 0x00000000, 0x00000000, + 0xEA20300C, 0xDB1F0DCD, 0xC3844672, 0x00000000, 0x00000000, + }, + }, + .mono_result = { + .dst_pitch = 2, + .expected = { + 0b100, 0b000, + 0b001, 0b000, + 0b010, 0b000, }, }, }, @@ -409,15 +440,12 @@ static size_t conversion_buf_size(u32 dst_format, unsigned int dst_pitch, const struct drm_rect *clip) { const struct drm_format_info *dst_fi = drm_format_info(dst_format); - unsigned int bpp; if (!dst_fi) return -EINVAL; - if (!dst_pitch) { - bpp = drm_format_info_bpp(dst_fi, 0); - dst_pitch = DIV_ROUND_UP(drm_rect_width(clip) * bpp, 8); - } + if (!dst_pitch) + dst_pitch = drm_format_info_min_pitch(dst_fi, 0, drm_rect_width(clip)); return dst_pitch * drm_rect_height(clip); } @@ -792,6 +820,36 @@ static void drm_test_fb_xrgb8888_to_argb2101010(struct kunit *test) KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); } +static void drm_test_fb_xrgb8888_to_mono(struct kunit *test) +{ + const struct convert_xrgb8888_case *params = test->param_value; + const struct convert_to_mono_result *result = ¶ms->mono_result; + size_t dst_size; + u8 *buf = NULL; + __le32 *xrgb8888 = NULL; + struct iosys_map dst, src; + + struct drm_framebuffer fb = { + .format = drm_format_info(DRM_FORMAT_XRGB8888), + .pitches = { params->pitch, 0, 0 }, + }; + + dst_size = conversion_buf_size(DRM_FORMAT_C1, result->dst_pitch, ¶ms->clip); + + KUNIT_ASSERT_GT(test, dst_size, 0); + + buf = kunit_kzalloc(test, dst_size, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf); + iosys_map_set_vaddr(&dst, buf); + + xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888); + iosys_map_set_vaddr(&src, xrgb8888); + + drm_fb_xrgb8888_to_mono(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip); + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); +} + static struct kunit_case drm_format_helper_test_cases[] = { KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_gray8, convert_xrgb8888_gen_params), KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb332, convert_xrgb8888_gen_params), @@ -803,6 +861,7 @@ static struct kunit_case drm_format_helper_test_cases[] = { KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb8888, convert_xrgb8888_gen_params), KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xrgb2101010, convert_xrgb8888_gen_params), KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb2101010, convert_xrgb8888_gen_params), + KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_mono, convert_xrgb8888_gen_params), {} }; diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c index 024346054c70..d254679a136e 100644 --- a/drivers/gpu/drm/tiny/bochs.c +++ b/drivers/gpu/drm/tiny/bochs.c @@ -545,7 +545,6 @@ static int bochs_kms_init(struct bochs_device *bochs) bochs->dev->mode_config.preferred_depth = 24; bochs->dev->mode_config.prefer_shadow = 0; - bochs->dev->mode_config.prefer_shadow_fbdev = 1; bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true; bochs->dev->mode_config.funcs = &bochs_mode_funcs; diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/tiny/ofdrm.c index 6e349ca42485..76cd7f515bab 100644 --- a/drivers/gpu/drm/tiny/ofdrm.c +++ b/drivers/gpu/drm/tiny/ofdrm.c @@ -162,13 +162,9 @@ static bool display_get_big_endian_of(struct drm_device *dev, struct device_node bool big_endian; #ifdef __BIG_ENDIAN - big_endian = true; - if (of_get_property(of_node, "little-endian", NULL)) - big_endian = false; + big_endian = !of_property_read_bool(of_node, "little-endian"); #else - big_endian = false; - if (of_get_property(of_node, "big-endian", NULL)) - big_endian = true; + big_endian = of_property_read_bool(of_node, "big-endian"); #endif return big_endian; diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index c38d85848af8..25e11ef11c4c 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -204,7 +204,7 @@ simplefb_get_memory_of(struct drm_device *dev, struct device_node *of_node) if (err) return ERR_PTR(err); - if (of_get_property(of_node, "reg", NULL)) + if (of_property_present(of_node, "reg")) drm_warn(dev, "preferring \"memory-region\" over \"reg\" property\n"); return res; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 459f1b4440da..bd5dae4d1624 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -84,6 +84,7 @@ EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); * ttm_bo_set_bulk_move - update BOs bulk move object * * @bo: The buffer object. + * @bulk: bulk move structure * * Update the BOs bulk move object, making sure that resources are added/removed * as well. A bulk move allows to move many resource on the LRU at once, @@ -294,8 +295,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, if (unlock_resv) dma_resv_unlock(bo->base.resv); - ttm_bo_put(bo); - return 0; } @@ -748,7 +747,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, * * @bo: Pointer to a struct ttm_buffer_object. the data of which * we want to allocate space for. - * @proposed_placement: Proposed new placement for the buffer object. + * @placement: Proposed new placement for the buffer object. * @mem: A struct ttm_resource. * @ctx: if and how to sleep, lock buffers and alloc memory * diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c index cd73631b6106..64a59f46f6c3 100644 --- a/drivers/gpu/drm/ttm/ttm_device.c +++ b/drivers/gpu/drm/ttm/ttm_device.c @@ -157,7 +157,7 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo = res->bo; uint32_t num_pages; - if (!bo) + if (!bo || bo->resource != res) continue; num_pages = PFN_UP(bo->base.size); diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index d30e4547b4c5..464c3cc8e6fb 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -3020,7 +3020,7 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) struct device *dev = &pdev->dev; int ret; - if (!of_find_property(dev->of_node, "interrupts", NULL)) { + if (!of_property_present(dev->of_node, "interrupts")) { dev_warn(dev, "'interrupts' DT property is missing, no CEC\n"); return 0; } diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h index 0ed300317f87..34cf63e6fb3d 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.h +++ b/drivers/gpu/drm/vgem/vgem_drv.h @@ -39,17 +39,6 @@ struct vgem_file { struct mutex fence_mutex; }; -#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base) -struct drm_vgem_gem_object { - struct drm_gem_object base; - - struct page **pages; - unsigned int pages_pin_count; - struct mutex pages_lock; - - struct sg_table *table; -}; - int vgem_fence_open(struct vgem_file *file); int vgem_fence_attach_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index e1accfc47edf..b1a00c0c25a7 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -604,7 +604,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); if (virtio_gpu_is_shmem(bo) && use_dma_api) - dma_sync_sgtable_for_device(&vgdev->vdev->dev, + dma_sync_sgtable_for_device(vgdev->vdev->dev.parent, bo->base.sgt, DMA_TO_DEVICE); cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); @@ -1025,7 +1025,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); if (virtio_gpu_is_shmem(bo) && use_dma_api) - dma_sync_sgtable_for_device(&vgdev->vdev->dev, + dma_sync_sgtable_for_device(vgdev->vdev->dev.parent, bo->base.sgt, DMA_TO_DEVICE); cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 84d6380b9895..5162a7a12792 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -2112,7 +2112,6 @@ int vmw_kms_init(struct vmw_private *dev_priv) dev->mode_config.max_width = dev_priv->texture_max_width; dev->mode_config.max_height = dev_priv->texture_max_height; dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32; - dev->mode_config.prefer_shadow_fbdev = !dev_priv->has_mob; drm_mode_create_suggested_offset_properties(dev); vmw_kms_create_hotplug_mode_update_property(dev_priv); diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 4872d183d860..aae2efeef503 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -487,7 +487,6 @@ static int host1x_get_resets(struct host1x *host) static int host1x_probe(struct platform_device *pdev) { struct host1x *host; - int syncpt_irq; int err; host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); @@ -517,8 +516,8 @@ static int host1x_probe(struct platform_device *pdev) } host->syncpt_irq = platform_get_irq(pdev, 0); - if (syncpt_irq < 0) - return syncpt_irq; + if (host->syncpt_irq < 0) + return host->syncpt_irq; mutex_init(&host->devices_lock); INIT_LIST_HEAD(&host->devices); diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 51b3d16c3223..6e4c92b500b8 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c @@ -488,10 +488,10 @@ static ssize_t temp_store(struct device *dev, struct device_attribute *attr, val = (temp - val) / 1000; if (sattr->index != 1) { - data->temp[HYSTERSIS][sattr->index] &= 0xF0; + data->temp[HYSTERSIS][sattr->index] &= 0x0F; data->temp[HYSTERSIS][sattr->index] |= (val & 0xF) << 4; } else { - data->temp[HYSTERSIS][sattr->index] &= 0x0F; + data->temp[HYSTERSIS][sattr->index] &= 0xF0; data->temp[HYSTERSIS][sattr->index] |= (val & 0xF); } @@ -556,11 +556,11 @@ static ssize_t temp_st_show(struct device *dev, struct device_attribute *attr, val = data->enh_acoustics[0] & 0xf; break; case 1: - val = (data->enh_acoustics[1] >> 4) & 0xf; + val = data->enh_acoustics[1] & 0xf; break; case 2: default: - val = data->enh_acoustics[1] & 0xf; + val = (data->enh_acoustics[1] >> 4) & 0xf; break; } diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index 33edb5c02f7d..d193ed3cb35e 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -757,6 +757,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, struct hwmon_device *hwdev; const char *label; struct device *hdev; + struct device *tdev = dev; int i, err, id; /* Complain about invalid characters in hwmon name attribute */ @@ -826,7 +827,9 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, hwdev->name = name; hdev->class = &hwmon_class; hdev->parent = dev; - hdev->of_node = dev ? dev->of_node : NULL; + while (tdev && !tdev->of_node) + tdev = tdev->parent; + hdev->of_node = tdev ? tdev->of_node : NULL; hwdev->chip = chip; dev_set_drvdata(hdev, drvdata); dev_set_name(hdev, HWMON_ID_FORMAT, id); @@ -838,7 +841,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, INIT_LIST_HEAD(&hwdev->tzdata); - if (dev && dev->of_node && chip && chip->ops->read && + if (hdev->of_node && chip && chip->ops->read && chip->info[0]->type == hwmon_chip && (chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) { err = hwmon_thermal_register_sensors(hdev); diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c index e06186986444..f3a4c5633b1e 100644 --- a/drivers/hwmon/ina3221.c +++ b/drivers/hwmon/ina3221.c @@ -772,7 +772,7 @@ static int ina3221_probe_child_from_dt(struct device *dev, return ret; } else if (val > INA3221_CHANNEL3) { dev_err(dev, "invalid reg %d of %pOFn\n", val, child); - return ret; + return -EINVAL; } input = &ina->inputs[val]; diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index 66f7ceaa7c3f..e9614eb557d4 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c @@ -515,6 +515,8 @@ static const struct it87_devices it87_devices[] = { #define has_six_temp(data) ((data)->features & FEAT_SIX_TEMP) #define has_vin3_5v(data) ((data)->features & FEAT_VIN3_5V) #define has_conf_noexit(data) ((data)->features & FEAT_CONF_NOEXIT) +#define has_scaling(data) ((data)->features & (FEAT_12MV_ADC | \ + FEAT_10_9MV_ADC)) struct it87_sio_data { int sioaddr; @@ -3134,7 +3136,7 @@ static int it87_probe(struct platform_device *pdev) "Detected broken BIOS defaults, disabling PWM interface\n"); /* Starting with IT8721F, we handle scaling of internal voltages */ - if (has_12mv_adc(data)) { + if (has_scaling(data)) { if (sio_data->internal & BIT(0)) data->in_scaled |= BIT(3); /* in3 is AVCC */ if (sio_data->internal & BIT(1)) diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c index 88514152d930..69341de397cb 100644 --- a/drivers/hwmon/ltc2992.c +++ b/drivers/hwmon/ltc2992.c @@ -323,6 +323,7 @@ static int ltc2992_config_gpio(struct ltc2992_state *st) st->gc.label = name; st->gc.parent = &st->client->dev; st->gc.owner = THIS_MODULE; + st->gc.can_sleep = true; st->gc.base = -1; st->gc.names = st->gpio_names; st->gc.ngpio = ARRAY_SIZE(st->gpio_names); diff --git a/drivers/hwmon/peci/cputemp.c b/drivers/hwmon/peci/cputemp.c index 30850a479f61..87d56f0fc888 100644 --- a/drivers/hwmon/peci/cputemp.c +++ b/drivers/hwmon/peci/cputemp.c @@ -537,6 +537,12 @@ static const struct cpu_info cpu_hsx = { .thermal_margin_to_millidegree = &dts_eight_dot_eight_to_millidegree, }; +static const struct cpu_info cpu_skx = { + .reg = &resolved_cores_reg_hsx, + .min_peci_revision = 0x33, + .thermal_margin_to_millidegree = &dts_ten_dot_six_to_millidegree, +}; + static const struct cpu_info cpu_icx = { .reg = &resolved_cores_reg_icx, .min_peci_revision = 0x40, @@ -558,7 +564,7 @@ static const struct auxiliary_device_id peci_cputemp_ids[] = { }, { .name = "peci_cpu.cputemp.skx", - .driver_data = (kernel_ulong_t)&cpu_hsx, + .driver_data = (kernel_ulong_t)&cpu_skx, }, { .name = "peci_cpu.cputemp.icx", diff --git a/drivers/hwmon/pmbus/adm1266.c b/drivers/hwmon/pmbus/adm1266.c index ec5f932fc6f0..1ac2b2f4c570 100644 --- a/drivers/hwmon/pmbus/adm1266.c +++ b/drivers/hwmon/pmbus/adm1266.c @@ -301,6 +301,7 @@ static int adm1266_config_gpio(struct adm1266_data *data) data->gc.label = name; data->gc.parent = &data->client->dev; data->gc.owner = THIS_MODULE; + data->gc.can_sleep = true; data->gc.base = -1; data->gc.names = data->gpio_names; data->gc.ngpio = ARRAY_SIZE(data->gpio_names); diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c index 75fc770c9e40..3daaf2237832 100644 --- a/drivers/hwmon/pmbus/ucd9000.c +++ b/drivers/hwmon/pmbus/ucd9000.c @@ -7,6 +7,7 @@ */ #include <linux/debugfs.h> +#include <linux/delay.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_device.h> @@ -16,6 +17,7 @@ #include <linux/i2c.h> #include <linux/pmbus.h> #include <linux/gpio/driver.h> +#include <linux/timekeeping.h> #include "pmbus.h" enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd90320, ucd9090, @@ -65,6 +67,7 @@ struct ucd9000_data { struct gpio_chip gpio; #endif struct dentry *debugfs; + ktime_t write_time; }; #define to_ucd9000_data(_info) container_of(_info, struct ucd9000_data, info) @@ -73,6 +76,73 @@ struct ucd9000_debugfs_entry { u8 index; }; +/* + * It has been observed that the UCD90320 randomly fails register access when + * doing another access right on the back of a register write. To mitigate this + * make sure that there is a minimum delay between a write access and the + * following access. The 250us is based on experimental data. At a delay of + * 200us the issue seems to go away. Add a bit of extra margin to allow for + * system to system differences. + */ +#define UCD90320_WAIT_DELAY_US 250 + +static inline void ucd90320_wait(const struct ucd9000_data *data) +{ + s64 delta = ktime_us_delta(ktime_get(), data->write_time); + + if (delta < UCD90320_WAIT_DELAY_US) + udelay(UCD90320_WAIT_DELAY_US - delta); +} + +static int ucd90320_read_word_data(struct i2c_client *client, int page, + int phase, int reg) +{ + const struct pmbus_driver_info *info = pmbus_get_driver_info(client); + struct ucd9000_data *data = to_ucd9000_data(info); + + if (reg >= PMBUS_VIRT_BASE) + return -ENXIO; + + ucd90320_wait(data); + return pmbus_read_word_data(client, page, phase, reg); +} + +static int ucd90320_read_byte_data(struct i2c_client *client, int page, int reg) +{ + const struct pmbus_driver_info *info = pmbus_get_driver_info(client); + struct ucd9000_data *data = to_ucd9000_data(info); + + ucd90320_wait(data); + return pmbus_read_byte_data(client, page, reg); +} + +static int ucd90320_write_word_data(struct i2c_client *client, int page, + int reg, u16 word) +{ + const struct pmbus_driver_info *info = pmbus_get_driver_info(client); + struct ucd9000_data *data = to_ucd9000_data(info); + int ret; + + ucd90320_wait(data); + ret = pmbus_write_word_data(client, page, reg, word); + data->write_time = ktime_get(); + + return ret; +} + +static int ucd90320_write_byte(struct i2c_client *client, int page, u8 value) +{ + const struct pmbus_driver_info *info = pmbus_get_driver_info(client); + struct ucd9000_data *data = to_ucd9000_data(info); + int ret; + + ucd90320_wait(data); + ret = pmbus_write_byte(client, page, value); + data->write_time = ktime_get(); + + return ret; +} + static int ucd9000_get_fan_config(struct i2c_client *client, int fan) { int fan_config = 0; @@ -598,6 +668,11 @@ static int ucd9000_probe(struct i2c_client *client) info->read_byte_data = ucd9000_read_byte_data; info->func[0] |= PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_FAN34 | PMBUS_HAVE_STATUS_FAN34; + } else if (mid->driver_data == ucd90320) { + info->read_byte_data = ucd90320_read_byte_data; + info->read_word_data = ucd90320_read_word_data; + info->write_byte = ucd90320_write_byte; + info->write_word_data = ucd90320_write_word_data; } ucd9000_probe_gpio(client, mid, data); diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c index 47bbe47e062f..7d5f7441aceb 100644 --- a/drivers/hwmon/tmp513.c +++ b/drivers/hwmon/tmp513.c @@ -758,7 +758,7 @@ static int tmp51x_probe(struct i2c_client *client) static struct i2c_driver tmp51x_driver = { .driver = { .name = "tmp51x", - .of_match_table = of_match_ptr(tmp51x_of_match), + .of_match_table = tmp51x_of_match, }, .probe_new = tmp51x_probe, .id_table = tmp51x_id, diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c index 5cde837bfd09..78d9f52e2a71 100644 --- a/drivers/hwmon/xgene-hwmon.c +++ b/drivers/hwmon/xgene-hwmon.c @@ -698,14 +698,14 @@ static int xgene_hwmon_probe(struct platform_device *pdev) ctx->comm_base_addr = pcc_chan->shmem_base_addr; if (ctx->comm_base_addr) { if (version == XGENE_HWMON_V2) - ctx->pcc_comm_addr = (void __force *)ioremap( - ctx->comm_base_addr, - pcc_chan->shmem_size); + ctx->pcc_comm_addr = (void __force *)devm_ioremap(&pdev->dev, + ctx->comm_base_addr, + pcc_chan->shmem_size); else - ctx->pcc_comm_addr = memremap( - ctx->comm_base_addr, - pcc_chan->shmem_size, - MEMREMAP_WB); + ctx->pcc_comm_addr = devm_memremap(&pdev->dev, + ctx->comm_base_addr, + pcc_chan->shmem_size, + MEMREMAP_WB); } else { dev_err(&pdev->dev, "Failed to get PCC comm region\n"); rc = -ENODEV; @@ -761,6 +761,7 @@ static int xgene_hwmon_remove(struct platform_device *pdev) { struct xgene_hwmon_dev *ctx = platform_get_drvdata(pdev); + cancel_work_sync(&ctx->workq); hwmon_device_unregister(ctx->hwmon_dev); kfifo_free(&ctx->async_msg_fifo); if (acpi_disabled) diff --git a/drivers/i2c/busses/i2c-hisi.c b/drivers/i2c/busses/i2c-hisi.c index 8c6c7075c765..e067671b3ce2 100644 --- a/drivers/i2c/busses/i2c-hisi.c +++ b/drivers/i2c/busses/i2c-hisi.c @@ -316,6 +316,13 @@ static void hisi_i2c_xfer_msg(struct hisi_i2c_controller *ctlr) max_write == 0) break; } + + /* + * Disable the TX_EMPTY interrupt after finishing all the messages to + * avoid overwhelming the CPU. + */ + if (ctlr->msg_tx_idx == ctlr->msg_num) + hisi_i2c_disable_int(ctlr, HISI_I2C_INT_TX_EMPTY); } static irqreturn_t hisi_i2c_irq(int irq, void *context) @@ -341,7 +348,11 @@ static irqreturn_t hisi_i2c_irq(int irq, void *context) hisi_i2c_read_rx_fifo(ctlr); out: - if (int_stat & HISI_I2C_INT_TRANS_CPLT || ctlr->xfer_err) { + /* + * Only use TRANS_CPLT to indicate the completion. On error cases we'll + * get two interrupts, INT_ERR first then TRANS_CPLT. + */ + if (int_stat & HISI_I2C_INT_TRANS_CPLT) { hisi_i2c_disable_int(ctlr, HISI_I2C_INT_ALL); hisi_i2c_clear_int(ctlr, HISI_I2C_INT_ALL); complete(ctlr->completion); diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c index 188f2a36d2fd..a49b14d52a98 100644 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c @@ -463,6 +463,8 @@ static int lpi2c_imx_xfer(struct i2c_adapter *adapter, if (num == 1 && msgs[0].len == 0) goto stop; + lpi2c_imx->rx_buf = NULL; + lpi2c_imx->tx_buf = NULL; lpi2c_imx->delivered = 0; lpi2c_imx->msglen = msgs[i].len; init_completion(&lpi2c_imx->complete); @@ -503,10 +505,14 @@ disable: static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id) { struct lpi2c_imx_struct *lpi2c_imx = dev_id; + unsigned int enabled; unsigned int temp; + enabled = readl(lpi2c_imx->base + LPI2C_MIER); + lpi2c_imx_intctrl(lpi2c_imx, 0); temp = readl(lpi2c_imx->base + LPI2C_MSR); + temp &= enabled; if (temp & MSR_RDF) lpi2c_imx_read_rxfifo(lpi2c_imx); diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index d113bed79545..e0f3b3545cfe 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c @@ -171,7 +171,7 @@ static void mxs_i2c_dma_irq_callback(void *param) } static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap, - struct i2c_msg *msg, uint32_t flags) + struct i2c_msg *msg, u8 *buf, uint32_t flags) { struct dma_async_tx_descriptor *desc; struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap); @@ -226,7 +226,7 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap, } /* Queue the DMA data transfer. */ - sg_init_one(&i2c->sg_io[1], msg->buf, msg->len); + sg_init_one(&i2c->sg_io[1], buf, msg->len); dma_map_sg(i2c->dev, &i2c->sg_io[1], 1, DMA_FROM_DEVICE); desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[1], 1, DMA_DEV_TO_MEM, @@ -259,7 +259,7 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap, /* Queue the DMA data transfer. */ sg_init_table(i2c->sg_io, 2); sg_set_buf(&i2c->sg_io[0], &i2c->addr_data, 1); - sg_set_buf(&i2c->sg_io[1], msg->buf, msg->len); + sg_set_buf(&i2c->sg_io[1], buf, msg->len); dma_map_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE); desc = dmaengine_prep_slave_sg(i2c->dmach, i2c->sg_io, 2, DMA_MEM_TO_DEV, @@ -563,6 +563,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap); int ret; int flags; + u8 *dma_buf; int use_pio = 0; unsigned long time_left; @@ -588,13 +589,20 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, if (ret && (ret != -ENXIO)) mxs_i2c_reset(i2c); } else { + dma_buf = i2c_get_dma_safe_msg_buf(msg, 1); + if (!dma_buf) + return -ENOMEM; + reinit_completion(&i2c->cmd_complete); - ret = mxs_i2c_dma_setup_xfer(adap, msg, flags); - if (ret) + ret = mxs_i2c_dma_setup_xfer(adap, msg, dma_buf, flags); + if (ret) { + i2c_put_dma_safe_msg_buf(dma_buf, msg, false); return ret; + } time_left = wait_for_completion_timeout(&i2c->cmd_complete, msecs_to_jiffies(1000)); + i2c_put_dma_safe_msg_buf(dma_buf, msg, true); if (!time_left) goto timeout; diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c index 63259b3ea5ab..3538d36368a9 100644 --- a/drivers/i2c/busses/i2c-xgene-slimpro.c +++ b/drivers/i2c/busses/i2c-xgene-slimpro.c @@ -308,6 +308,9 @@ static int slimpro_i2c_blkwr(struct slimpro_i2c_dev *ctx, u32 chip, u32 msg[3]; int rc; + if (writelen > I2C_SMBUS_BLOCK_MAX) + return -EINVAL; + memcpy(ctx->dma_buffer, data, writelen); paddr = dma_map_single(ctx->dev, ctx->dma_buffer, writelen, DMA_TO_DEVICE); diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c index 0f392f59b135..7a24c1444ace 100644 --- a/drivers/interconnect/core.c +++ b/drivers/interconnect/core.c @@ -850,6 +850,10 @@ void icc_node_destroy(int id) mutex_unlock(&icc_lock); + if (!node) + return; + + kfree(node->links); kfree(node); } EXPORT_SYMBOL_GPL(icc_node_destroy); @@ -1029,54 +1033,68 @@ int icc_nodes_remove(struct icc_provider *provider) EXPORT_SYMBOL_GPL(icc_nodes_remove); /** - * icc_provider_add() - add a new interconnect provider - * @provider: the interconnect provider that will be added into topology + * icc_provider_init() - initialize a new interconnect provider + * @provider: the interconnect provider to initialize + * + * Must be called before adding nodes to the provider. + */ +void icc_provider_init(struct icc_provider *provider) +{ + WARN_ON(!provider->set); + + INIT_LIST_HEAD(&provider->nodes); +} +EXPORT_SYMBOL_GPL(icc_provider_init); + +/** + * icc_provider_register() - register a new interconnect provider + * @provider: the interconnect provider to register * * Return: 0 on success, or an error code otherwise */ -int icc_provider_add(struct icc_provider *provider) +int icc_provider_register(struct icc_provider *provider) { - if (WARN_ON(!provider->set)) - return -EINVAL; if (WARN_ON(!provider->xlate && !provider->xlate_extended)) return -EINVAL; mutex_lock(&icc_lock); - - INIT_LIST_HEAD(&provider->nodes); list_add_tail(&provider->provider_list, &icc_providers); - mutex_unlock(&icc_lock); - dev_dbg(provider->dev, "interconnect provider added to topology\n"); + dev_dbg(provider->dev, "interconnect provider registered\n"); return 0; } -EXPORT_SYMBOL_GPL(icc_provider_add); +EXPORT_SYMBOL_GPL(icc_provider_register); /** - * icc_provider_del() - delete previously added interconnect provider - * @provider: the interconnect provider that will be removed from topology + * icc_provider_deregister() - deregister an interconnect provider + * @provider: the interconnect provider to deregister */ -void icc_provider_del(struct icc_provider *provider) +void icc_provider_deregister(struct icc_provider *provider) { mutex_lock(&icc_lock); - if (provider->users) { - pr_warn("interconnect provider still has %d users\n", - provider->users); - mutex_unlock(&icc_lock); - return; - } - - if (!list_empty(&provider->nodes)) { - pr_warn("interconnect provider still has nodes\n"); - mutex_unlock(&icc_lock); - return; - } + WARN_ON(provider->users); list_del(&provider->provider_list); mutex_unlock(&icc_lock); } +EXPORT_SYMBOL_GPL(icc_provider_deregister); + +int icc_provider_add(struct icc_provider *provider) +{ + icc_provider_init(provider); + + return icc_provider_register(provider); +} +EXPORT_SYMBOL_GPL(icc_provider_add); + +void icc_provider_del(struct icc_provider *provider) +{ + WARN_ON(!list_empty(&provider->nodes)); + + icc_provider_deregister(provider); +} EXPORT_SYMBOL_GPL(icc_provider_del); static const struct of_device_id __maybe_unused ignore_list[] = { diff --git a/drivers/interconnect/imx/imx.c b/drivers/interconnect/imx/imx.c index 823d9be9771a..979ed610f704 100644 --- a/drivers/interconnect/imx/imx.c +++ b/drivers/interconnect/imx/imx.c @@ -295,6 +295,9 @@ int imx_icc_register(struct platform_device *pdev, provider->xlate = of_icc_xlate_onecell; provider->data = data; provider->dev = dev->parent; + + icc_provider_init(provider); + platform_set_drvdata(pdev, imx_provider); if (settings) { @@ -306,20 +309,18 @@ int imx_icc_register(struct platform_device *pdev, } } - ret = icc_provider_add(provider); - if (ret) { - dev_err(dev, "error adding interconnect provider: %d\n", ret); + ret = imx_icc_register_nodes(imx_provider, nodes, nodes_count, settings); + if (ret) return ret; - } - ret = imx_icc_register_nodes(imx_provider, nodes, nodes_count, settings); + ret = icc_provider_register(provider); if (ret) - goto provider_del; + goto err_unregister_nodes; return 0; -provider_del: - icc_provider_del(provider); +err_unregister_nodes: + imx_icc_unregister_nodes(&imx_provider->provider); return ret; } EXPORT_SYMBOL_GPL(imx_icc_register); @@ -328,9 +329,8 @@ void imx_icc_unregister(struct platform_device *pdev) { struct imx_icc_provider *imx_provider = platform_get_drvdata(pdev); + icc_provider_deregister(&imx_provider->provider); imx_icc_unregister_nodes(&imx_provider->provider); - - icc_provider_del(&imx_provider->provider); } EXPORT_SYMBOL_GPL(imx_icc_unregister); diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c index df3196f72536..4180a06681b2 100644 --- a/drivers/interconnect/qcom/icc-rpm.c +++ b/drivers/interconnect/qcom/icc-rpm.c @@ -503,7 +503,6 @@ regmap_done: } provider = &qp->provider; - INIT_LIST_HEAD(&provider->nodes); provider->dev = dev; provider->set = qcom_icc_set; provider->pre_aggregate = qcom_icc_pre_bw_aggregate; @@ -511,12 +510,7 @@ regmap_done: provider->xlate_extended = qcom_icc_xlate_extended; provider->data = data; - ret = icc_provider_add(provider); - if (ret) { - dev_err(dev, "error adding interconnect provider: %d\n", ret); - clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks); - return ret; - } + icc_provider_init(provider); for (i = 0; i < num_nodes; i++) { size_t j; @@ -524,7 +518,7 @@ regmap_done: node = icc_node_create(qnodes[i]->id); if (IS_ERR(node)) { ret = PTR_ERR(node); - goto err; + goto err_remove_nodes; } node->name = qnodes[i]->name; @@ -538,17 +532,26 @@ regmap_done: } data->num_nodes = num_nodes; + ret = icc_provider_register(provider); + if (ret) + goto err_remove_nodes; + platform_set_drvdata(pdev, qp); /* Populate child NoC devices if any */ - if (of_get_child_count(dev->of_node) > 0) - return of_platform_populate(dev->of_node, NULL, NULL, dev); + if (of_get_child_count(dev->of_node) > 0) { + ret = of_platform_populate(dev->of_node, NULL, NULL, dev); + if (ret) + goto err_deregister_provider; + } return 0; -err: + +err_deregister_provider: + icc_provider_deregister(provider); +err_remove_nodes: icc_nodes_remove(provider); clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks); - icc_provider_del(provider); return ret; } @@ -558,9 +561,9 @@ int qnoc_remove(struct platform_device *pdev) { struct qcom_icc_provider *qp = platform_get_drvdata(pdev); + icc_provider_deregister(&qp->provider); icc_nodes_remove(&qp->provider); clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks); - icc_provider_del(&qp->provider); return 0; } diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c index fd17291c61eb..fdb5e58e408b 100644 --- a/drivers/interconnect/qcom/icc-rpmh.c +++ b/drivers/interconnect/qcom/icc-rpmh.c @@ -192,9 +192,10 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev) provider->pre_aggregate = qcom_icc_pre_aggregate; provider->aggregate = qcom_icc_aggregate; provider->xlate_extended = qcom_icc_xlate_extended; - INIT_LIST_HEAD(&provider->nodes); provider->data = data; + icc_provider_init(provider); + qp->dev = dev; qp->bcms = desc->bcms; qp->num_bcms = desc->num_bcms; @@ -203,10 +204,6 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev) if (IS_ERR(qp->voter)) return PTR_ERR(qp->voter); - ret = icc_provider_add(provider); - if (ret) - return ret; - for (i = 0; i < qp->num_bcms; i++) qcom_icc_bcm_init(qp->bcms[i], dev); @@ -218,7 +215,7 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev) node = icc_node_create(qn->id); if (IS_ERR(node)) { ret = PTR_ERR(node); - goto err; + goto err_remove_nodes; } node->name = qn->name; @@ -232,16 +229,27 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev) } data->num_nodes = num_nodes; + + ret = icc_provider_register(provider); + if (ret) + goto err_remove_nodes; + platform_set_drvdata(pdev, qp); /* Populate child NoC devices if any */ - if (of_get_child_count(dev->of_node) > 0) - return of_platform_populate(dev->of_node, NULL, NULL, dev); + if (of_get_child_count(dev->of_node) > 0) { + ret = of_platform_populate(dev->of_node, NULL, NULL, dev); + if (ret) + goto err_deregister_provider; + } return 0; -err: + +err_deregister_provider: + icc_provider_deregister(provider); +err_remove_nodes: icc_nodes_remove(provider); - icc_provider_del(provider); + return ret; } EXPORT_SYMBOL_GPL(qcom_icc_rpmh_probe); @@ -250,8 +258,8 @@ int qcom_icc_rpmh_remove(struct platform_device *pdev) { struct qcom_icc_provider *qp = platform_get_drvdata(pdev); + icc_provider_deregister(&qp->provider); icc_nodes_remove(&qp->provider); - icc_provider_del(&qp->provider); return 0; } diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c index 5ea192f1141d..1828deaca443 100644 --- a/drivers/interconnect/qcom/msm8974.c +++ b/drivers/interconnect/qcom/msm8974.c @@ -692,7 +692,6 @@ static int msm8974_icc_probe(struct platform_device *pdev) return ret; provider = &qp->provider; - INIT_LIST_HEAD(&provider->nodes); provider->dev = dev; provider->set = msm8974_icc_set; provider->aggregate = icc_std_aggregate; @@ -700,11 +699,7 @@ static int msm8974_icc_probe(struct platform_device *pdev) provider->data = data; provider->get_bw = msm8974_get_bw; - ret = icc_provider_add(provider); - if (ret) { - dev_err(dev, "error adding interconnect provider: %d\n", ret); - goto err_disable_clks; - } + icc_provider_init(provider); for (i = 0; i < num_nodes; i++) { size_t j; @@ -712,7 +707,7 @@ static int msm8974_icc_probe(struct platform_device *pdev) node = icc_node_create(qnodes[i]->id); if (IS_ERR(node)) { ret = PTR_ERR(node); - goto err_del_icc; + goto err_remove_nodes; } node->name = qnodes[i]->name; @@ -729,15 +724,16 @@ static int msm8974_icc_probe(struct platform_device *pdev) } data->num_nodes = num_nodes; + ret = icc_provider_register(provider); + if (ret) + goto err_remove_nodes; + platform_set_drvdata(pdev, qp); return 0; -err_del_icc: +err_remove_nodes: icc_nodes_remove(provider); - icc_provider_del(provider); - -err_disable_clks: clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks); return ret; @@ -747,9 +743,9 @@ static int msm8974_icc_remove(struct platform_device *pdev) { struct msm8974_icc_provider *qp = platform_get_drvdata(pdev); + icc_provider_deregister(&qp->provider); icc_nodes_remove(&qp->provider); clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks); - icc_provider_del(&qp->provider); return 0; } diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c index 5fa171087425..1bafb54f1432 100644 --- a/drivers/interconnect/qcom/osm-l3.c +++ b/drivers/interconnect/qcom/osm-l3.c @@ -158,8 +158,8 @@ static int qcom_osm_l3_remove(struct platform_device *pdev) { struct qcom_osm_l3_icc_provider *qp = platform_get_drvdata(pdev); + icc_provider_deregister(&qp->provider); icc_nodes_remove(&qp->provider); - icc_provider_del(&qp->provider); return 0; } @@ -236,7 +236,7 @@ static int qcom_osm_l3_probe(struct platform_device *pdev) qnodes = desc->nodes; num_nodes = desc->num_nodes; - data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL); + data = devm_kzalloc(&pdev->dev, struct_size(data, nodes, num_nodes), GFP_KERNEL); if (!data) return -ENOMEM; @@ -245,14 +245,9 @@ static int qcom_osm_l3_probe(struct platform_device *pdev) provider->set = qcom_osm_l3_set; provider->aggregate = icc_std_aggregate; provider->xlate = of_icc_xlate_onecell; - INIT_LIST_HEAD(&provider->nodes); provider->data = data; - ret = icc_provider_add(provider); - if (ret) { - dev_err(&pdev->dev, "error adding interconnect provider\n"); - return ret; - } + icc_provider_init(provider); for (i = 0; i < num_nodes; i++) { size_t j; @@ -275,12 +270,15 @@ static int qcom_osm_l3_probe(struct platform_device *pdev) } data->num_nodes = num_nodes; + ret = icc_provider_register(provider); + if (ret) + goto err; + platform_set_drvdata(pdev, qp); return 0; err: icc_nodes_remove(provider); - icc_provider_del(provider); return ret; } diff --git a/drivers/interconnect/qcom/qcm2290.c b/drivers/interconnect/qcom/qcm2290.c index 0da612d6398c..a29cdb4fac03 100644 --- a/drivers/interconnect/qcom/qcm2290.c +++ b/drivers/interconnect/qcom/qcm2290.c @@ -147,9 +147,9 @@ static struct qcom_icc_node mas_snoc_bimc_nrt = { .name = "mas_snoc_bimc_nrt", .buswidth = 16, .qos.ap_owned = true, - .qos.qos_port = 2, + .qos.qos_port = 3, .qos.qos_mode = NOC_QOS_MODE_BYPASS, - .mas_rpm_id = 163, + .mas_rpm_id = 164, .slv_rpm_id = -1, .num_links = ARRAY_SIZE(mas_snoc_bimc_nrt_links), .links = mas_snoc_bimc_nrt_links, diff --git a/drivers/interconnect/qcom/sm8450.c b/drivers/interconnect/qcom/sm8450.c index e3a12e3d6e06..2d7a8e7b85ec 100644 --- a/drivers/interconnect/qcom/sm8450.c +++ b/drivers/interconnect/qcom/sm8450.c @@ -1844,100 +1844,6 @@ static const struct qcom_icc_desc sm8450_system_noc = { .num_bcms = ARRAY_SIZE(system_noc_bcms), }; -static int qnoc_probe(struct platform_device *pdev) -{ - const struct qcom_icc_desc *desc; - struct icc_onecell_data *data; - struct icc_provider *provider; - struct qcom_icc_node * const *qnodes; - struct qcom_icc_provider *qp; - struct icc_node *node; - size_t num_nodes, i; - int ret; - - desc = device_get_match_data(&pdev->dev); - if (!desc) - return -EINVAL; - - qnodes = desc->nodes; - num_nodes = desc->num_nodes; - - qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL); - if (!qp) - return -ENOMEM; - - data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL); - if (!data) - return -ENOMEM; - - provider = &qp->provider; - provider->dev = &pdev->dev; - provider->set = qcom_icc_set; - provider->pre_aggregate = qcom_icc_pre_aggregate; - provider->aggregate = qcom_icc_aggregate; - provider->xlate_extended = qcom_icc_xlate_extended; - INIT_LIST_HEAD(&provider->nodes); - provider->data = data; - - qp->dev = &pdev->dev; - qp->bcms = desc->bcms; - qp->num_bcms = desc->num_bcms; - - qp->voter = of_bcm_voter_get(qp->dev, NULL); - if (IS_ERR(qp->voter)) - return PTR_ERR(qp->voter); - - ret = icc_provider_add(provider); - if (ret) { - dev_err(&pdev->dev, "error adding interconnect provider\n"); - return ret; - } - - for (i = 0; i < qp->num_bcms; i++) - qcom_icc_bcm_init(qp->bcms[i], &pdev->dev); - - for (i = 0; i < num_nodes; i++) { - size_t j; - - if (!qnodes[i]) - continue; - - node = icc_node_create(qnodes[i]->id); - if (IS_ERR(node)) { - ret = PTR_ERR(node); - goto err; - } - - node->name = qnodes[i]->name; - node->data = qnodes[i]; - icc_node_add(node, provider); - - for (j = 0; j < qnodes[i]->num_links; j++) - icc_link_create(node, qnodes[i]->links[j]); - - data->nodes[i] = node; - } - data->num_nodes = num_nodes; - - platform_set_drvdata(pdev, qp); - - return 0; -err: - icc_nodes_remove(provider); - icc_provider_del(provider); - return ret; -} - -static int qnoc_remove(struct platform_device *pdev) -{ - struct qcom_icc_provider *qp = platform_get_drvdata(pdev); - - icc_nodes_remove(&qp->provider); - icc_provider_del(&qp->provider); - - return 0; -} - static const struct of_device_id qnoc_of_match[] = { { .compatible = "qcom,sm8450-aggre1-noc", .data = &sm8450_aggre1_noc}, @@ -1966,8 +1872,8 @@ static const struct of_device_id qnoc_of_match[] = { MODULE_DEVICE_TABLE(of, qnoc_of_match); static struct platform_driver qnoc_driver = { - .probe = qnoc_probe, - .remove = qnoc_remove, + .probe = qcom_icc_rpmh_probe, + .remove = qcom_icc_rpmh_remove, .driver = { .name = "qnoc-sm8450", .of_match_table = qnoc_of_match, diff --git a/drivers/interconnect/qcom/sm8550.c b/drivers/interconnect/qcom/sm8550.c index 54fa027ab961..d823ba988ef6 100644 --- a/drivers/interconnect/qcom/sm8550.c +++ b/drivers/interconnect/qcom/sm8550.c @@ -2165,101 +2165,6 @@ static const struct qcom_icc_desc sm8550_system_noc = { .num_bcms = ARRAY_SIZE(system_noc_bcms), }; -static int qnoc_probe(struct platform_device *pdev) -{ - const struct qcom_icc_desc *desc; - struct icc_onecell_data *data; - struct icc_provider *provider; - struct qcom_icc_node * const *qnodes; - struct qcom_icc_provider *qp; - struct icc_node *node; - size_t num_nodes, i; - int ret; - - desc = device_get_match_data(&pdev->dev); - if (!desc) - return -EINVAL; - - qnodes = desc->nodes; - num_nodes = desc->num_nodes; - - qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL); - if (!qp) - return -ENOMEM; - - data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL); - if (!data) - return -ENOMEM; - - provider = &qp->provider; - provider->dev = &pdev->dev; - provider->set = qcom_icc_set; - provider->pre_aggregate = qcom_icc_pre_aggregate; - provider->aggregate = qcom_icc_aggregate; - provider->xlate_extended = qcom_icc_xlate_extended; - INIT_LIST_HEAD(&provider->nodes); - provider->data = data; - - qp->dev = &pdev->dev; - qp->bcms = desc->bcms; - qp->num_bcms = desc->num_bcms; - - qp->voter = of_bcm_voter_get(qp->dev, NULL); - if (IS_ERR(qp->voter)) - return PTR_ERR(qp->voter); - - ret = icc_provider_add(provider); - if (ret) { - dev_err_probe(&pdev->dev, ret, - "error adding interconnect provider\n"); - return ret; - } - - for (i = 0; i < qp->num_bcms; i++) - qcom_icc_bcm_init(qp->bcms[i], &pdev->dev); - - for (i = 0; i < num_nodes; i++) { - size_t j; - - if (!qnodes[i]) - continue; - - node = icc_node_create(qnodes[i]->id); - if (IS_ERR(node)) { - ret = PTR_ERR(node); - goto err; - } - - node->name = qnodes[i]->name; - node->data = qnodes[i]; - icc_node_add(node, provider); - - for (j = 0; j < qnodes[i]->num_links; j++) - icc_link_create(node, qnodes[i]->links[j]); - - data->nodes[i] = node; - } - data->num_nodes = num_nodes; - - platform_set_drvdata(pdev, qp); - - return 0; -err: - icc_nodes_remove(provider); - icc_provider_del(provider); - return ret; -} - -static int qnoc_remove(struct platform_device *pdev) -{ - struct qcom_icc_provider *qp = platform_get_drvdata(pdev); - - icc_nodes_remove(&qp->provider); - icc_provider_del(&qp->provider); - - return 0; -} - static const struct of_device_id qnoc_of_match[] = { { .compatible = "qcom,sm8550-aggre1-noc", .data = &sm8550_aggre1_noc}, @@ -2294,8 +2199,8 @@ static const struct of_device_id qnoc_of_match[] = { MODULE_DEVICE_TABLE(of, qnoc_of_match); static struct platform_driver qnoc_driver = { - .probe = qnoc_probe, - .remove = qnoc_remove, + .probe = qcom_icc_rpmh_probe, + .remove = qcom_icc_rpmh_remove, .driver = { .name = "qnoc-sm8550", .of_match_table = qnoc_of_match, diff --git a/drivers/interconnect/samsung/exynos.c b/drivers/interconnect/samsung/exynos.c index 6559d8cf8068..ebf09bbf725b 100644 --- a/drivers/interconnect/samsung/exynos.c +++ b/drivers/interconnect/samsung/exynos.c @@ -96,14 +96,9 @@ static struct icc_node *exynos_generic_icc_xlate(struct of_phandle_args *spec, static int exynos_generic_icc_remove(struct platform_device *pdev) { struct exynos_icc_priv *priv = platform_get_drvdata(pdev); - struct icc_node *parent_node, *node = priv->node; - - parent_node = exynos_icc_get_parent(priv->dev->parent->of_node); - if (parent_node && !IS_ERR(parent_node)) - icc_link_destroy(node, parent_node); + icc_provider_deregister(&priv->provider); icc_nodes_remove(&priv->provider); - icc_provider_del(&priv->provider); return 0; } @@ -132,15 +127,11 @@ static int exynos_generic_icc_probe(struct platform_device *pdev) provider->inter_set = true; provider->data = priv; - ret = icc_provider_add(provider); - if (ret < 0) - return ret; + icc_provider_init(provider); icc_node = icc_node_create(pdev->id); - if (IS_ERR(icc_node)) { - ret = PTR_ERR(icc_node); - goto err_prov_del; - } + if (IS_ERR(icc_node)) + return PTR_ERR(icc_node); priv->node = icc_node; icc_node->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn", @@ -149,6 +140,9 @@ static int exynos_generic_icc_probe(struct platform_device *pdev) &priv->bus_clk_ratio)) priv->bus_clk_ratio = EXYNOS_ICC_DEFAULT_BUS_CLK_RATIO; + icc_node->data = priv; + icc_node_add(icc_node, provider); + /* * Register a PM QoS request for the parent (devfreq) device. */ @@ -157,9 +151,6 @@ static int exynos_generic_icc_probe(struct platform_device *pdev) if (ret < 0) goto err_node_del; - icc_node->data = priv; - icc_node_add(icc_node, provider); - icc_parent_node = exynos_icc_get_parent(bus_dev->of_node); if (IS_ERR(icc_parent_node)) { ret = PTR_ERR(icc_parent_node); @@ -171,14 +162,17 @@ static int exynos_generic_icc_probe(struct platform_device *pdev) goto err_pmqos_del; } + ret = icc_provider_register(provider); + if (ret < 0) + goto err_pmqos_del; + return 0; err_pmqos_del: dev_pm_qos_remove_request(&priv->qos_req); err_node_del: icc_nodes_remove(provider); -err_prov_del: - icc_provider_del(provider); + return ret; } diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 5f1e2593fad7..b0a22e99bade 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -15,6 +15,10 @@ if MD config BLK_DEV_MD tristate "RAID support" select BLOCK_HOLDER_DEPRECATED if SYSFS + # BLOCK_LEGACY_AUTOLOAD requirement should be removed + # after relevant mdadm enhancements - to make "names=yes" + # the default - are widely available. + select BLOCK_LEGACY_AUTOLOAD help This driver lets you combine several hard disk partitions into one logical block device. This can be used to simply append one diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 40cb1719ae4d..3ba53dc3cc3f 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -72,7 +72,9 @@ struct dm_crypt_io { struct crypt_config *cc; struct bio *base_bio; u8 *integrity_metadata; - bool integrity_metadata_from_pool; + bool integrity_metadata_from_pool:1; + bool in_tasklet:1; + struct work_struct work; struct tasklet_struct tasklet; @@ -1730,6 +1732,7 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, io->ctx.r.req = NULL; io->integrity_metadata = NULL; io->integrity_metadata_from_pool = false; + io->in_tasklet = false; atomic_set(&io->io_pending, 0); } @@ -1776,14 +1779,13 @@ static void crypt_dec_pending(struct dm_crypt_io *io) * our tasklet. In this case we need to delay bio_endio() * execution to after the tasklet is done and dequeued. */ - if (tasklet_trylock(&io->tasklet)) { - tasklet_unlock(&io->tasklet); - bio_endio(base_bio); + if (io->in_tasklet) { + INIT_WORK(&io->work, kcryptd_io_bio_endio); + queue_work(cc->io_queue, &io->work); return; } - INIT_WORK(&io->work, kcryptd_io_bio_endio); - queue_work(cc->io_queue, &io->work); + bio_endio(base_bio); } /* @@ -1936,6 +1938,7 @@ pop_from_list: io = crypt_io_from_node(rb_first(&write_tree)); rb_erase(&io->rb_node, &write_tree); kcryptd_io_write(io); + cond_resched(); } while (!RB_EMPTY_ROOT(&write_tree)); blk_finish_plug(&plug); } @@ -2230,6 +2233,7 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io) * it is being executed with irqs disabled. */ if (in_hardirq() || irqs_disabled()) { + io->in_tasklet = true; tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work); tasklet_schedule(&io->tasklet); return; diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index c21a19ab73f7..db2d997a6c18 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c @@ -188,7 +188,7 @@ static int dm_stat_in_flight(struct dm_stat_shared *shared) atomic_read(&shared->in_flight[WRITE]); } -void dm_stats_init(struct dm_stats *stats) +int dm_stats_init(struct dm_stats *stats) { int cpu; struct dm_stats_last_position *last; @@ -197,11 +197,16 @@ void dm_stats_init(struct dm_stats *stats) INIT_LIST_HEAD(&stats->list); stats->precise_timestamps = false; stats->last = alloc_percpu(struct dm_stats_last_position); + if (!stats->last) + return -ENOMEM; + for_each_possible_cpu(cpu) { last = per_cpu_ptr(stats->last, cpu); last->last_sector = (sector_t)ULLONG_MAX; last->last_rw = UINT_MAX; } + + return 0; } void dm_stats_cleanup(struct dm_stats *stats) diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h index 0bc152c8e4f3..c6728c8b4159 100644 --- a/drivers/md/dm-stats.h +++ b/drivers/md/dm-stats.h @@ -21,7 +21,7 @@ struct dm_stats_aux { unsigned long long duration_ns; }; -void dm_stats_init(struct dm_stats *st); +int dm_stats_init(struct dm_stats *st); void dm_stats_cleanup(struct dm_stats *st); struct mapped_device; diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 6cd105c1cef3..13d4677baafd 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -3369,6 +3369,7 @@ static int pool_ctr(struct dm_target *ti, unsigned int argc, char **argv) pt->low_water_blocks = low_water_blocks; pt->adjusted_pf = pt->requested_pf = pf; ti->num_flush_bios = 1; + ti->limit_swap_bios = true; /* * Only need to enable discards if the pool should pass @@ -4249,6 +4250,7 @@ static int thin_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; ti->num_flush_bios = 1; + ti->limit_swap_bios = true; ti->flush_supported = true; ti->accounts_remapped_io = true; ti->per_io_data_size = sizeof(struct dm_thin_endio_hook); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index eace45a18d45..2d0f934ba6e6 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -512,10 +512,10 @@ static void dm_io_acct(struct dm_io *io, bool end) sectors = io->sectors; if (!end) - bdev_start_io_acct(bio->bi_bdev, sectors, bio_op(bio), - start_time); + bdev_start_io_acct(bio->bi_bdev, bio_op(bio), start_time); else - bdev_end_io_acct(bio->bi_bdev, bio_op(bio), start_time); + bdev_end_io_acct(bio->bi_bdev, bio_op(bio), sectors, + start_time); if (static_branch_unlikely(&stats_enabled) && unlikely(dm_stats_used(&md->stats))) { @@ -2097,7 +2097,9 @@ static struct mapped_device *alloc_dev(int minor) if (!md->pending_io) goto bad; - dm_stats_init(&md->stats); + r = dm_stats_init(&md->stats); + if (r < 0) + goto bad; /* Populate the mapping, nobody knows we exist yet */ spin_lock(&_minor_lock); diff --git a/drivers/md/md.c b/drivers/md/md.c index 927a43db5dfb..39e49e5d7182 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3128,6 +3128,9 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len) err = kstrtouint(buf, 10, (unsigned int *)&slot); if (err < 0) return err; + if (slot < 0) + /* overflow */ + return -ENOSPC; } if (rdev->mddev->pers && slot == -1) { /* Setting 'slot' on an active array requires also @@ -6256,6 +6259,11 @@ static void __md_stop(struct mddev *mddev) mddev->to_remove = &md_redundancy_group; module_put(pers->owner); clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + + percpu_ref_exit(&mddev->writes_pending); + percpu_ref_exit(&mddev->active_io); + bioset_exit(&mddev->bio_set); + bioset_exit(&mddev->sync_set); } void md_stop(struct mddev *mddev) @@ -6265,10 +6273,6 @@ void md_stop(struct mddev *mddev) */ __md_stop_writes(mddev); __md_stop(mddev); - percpu_ref_exit(&mddev->writes_pending); - percpu_ref_exit(&mddev->active_io); - bioset_exit(&mddev->bio_set); - bioset_exit(&mddev->sync_set); } EXPORT_SYMBOL_GPL(md_stop); @@ -7839,11 +7843,6 @@ static void md_free_disk(struct gendisk *disk) { struct mddev *mddev = disk->private_data; - percpu_ref_exit(&mddev->writes_pending); - percpu_ref_exit(&mddev->active_io); - bioset_exit(&mddev->bio_set); - bioset_exit(&mddev->sync_set); - mddev_free(mddev); } diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c index 2b01873ba0db..5c2336f318d9 100644 --- a/drivers/media/i2c/m5mols/m5mols_core.c +++ b/drivers/media/i2c/m5mols/m5mols_core.c @@ -488,7 +488,7 @@ static enum m5mols_restype __find_restype(u32 code) do { if (code == m5mols_default_ffmt[type].code) return type; - } while (type++ != SIZE_DEFAULT_FFMT); + } while (++type != SIZE_DEFAULT_FFMT); return 0; } diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c index 592907546ee6..5cd28619ea9f 100644 --- a/drivers/memory/tegra/mc.c +++ b/drivers/memory/tegra/mc.c @@ -794,16 +794,12 @@ static int tegra_mc_interconnect_setup(struct tegra_mc *mc) mc->provider.aggregate = mc->soc->icc_ops->aggregate; mc->provider.xlate_extended = mc->soc->icc_ops->xlate_extended; - err = icc_provider_add(&mc->provider); - if (err) - return err; + icc_provider_init(&mc->provider); /* create Memory Controller node */ node = icc_node_create(TEGRA_ICC_MC); - if (IS_ERR(node)) { - err = PTR_ERR(node); - goto del_provider; - } + if (IS_ERR(node)) + return PTR_ERR(node); node->name = "Memory Controller"; icc_node_add(node, &mc->provider); @@ -830,12 +826,14 @@ static int tegra_mc_interconnect_setup(struct tegra_mc *mc) goto remove_nodes; } + err = icc_provider_register(&mc->provider); + if (err) + goto remove_nodes; + return 0; remove_nodes: icc_nodes_remove(&mc->provider); -del_provider: - icc_provider_del(&mc->provider); return err; } diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c index 85bc936c02f9..00ed2b6a0d1b 100644 --- a/drivers/memory/tegra/tegra124-emc.c +++ b/drivers/memory/tegra/tegra124-emc.c @@ -1351,15 +1351,13 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc) emc->provider.aggregate = soc->icc_ops->aggregate; emc->provider.xlate_extended = emc_of_icc_xlate_extended; - err = icc_provider_add(&emc->provider); - if (err) - goto err_msg; + icc_provider_init(&emc->provider); /* create External Memory Controller node */ node = icc_node_create(TEGRA_ICC_EMC); if (IS_ERR(node)) { err = PTR_ERR(node); - goto del_provider; + goto err_msg; } node->name = "External Memory Controller"; @@ -1380,12 +1378,14 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc) node->name = "External Memory (DRAM)"; icc_node_add(node, &emc->provider); + err = icc_provider_register(&emc->provider); + if (err) + goto remove_nodes; + return 0; remove_nodes: icc_nodes_remove(&emc->provider); -del_provider: - icc_provider_del(&emc->provider); err_msg: dev_err(emc->dev, "failed to initialize ICC: %d\n", err); diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c index bd4e37b6552d..fd595c851a27 100644 --- a/drivers/memory/tegra/tegra20-emc.c +++ b/drivers/memory/tegra/tegra20-emc.c @@ -1021,15 +1021,13 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc) emc->provider.aggregate = soc->icc_ops->aggregate; emc->provider.xlate_extended = emc_of_icc_xlate_extended; - err = icc_provider_add(&emc->provider); - if (err) - goto err_msg; + icc_provider_init(&emc->provider); /* create External Memory Controller node */ node = icc_node_create(TEGRA_ICC_EMC); if (IS_ERR(node)) { err = PTR_ERR(node); - goto del_provider; + goto err_msg; } node->name = "External Memory Controller"; @@ -1050,12 +1048,14 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc) node->name = "External Memory (DRAM)"; icc_node_add(node, &emc->provider); + err = icc_provider_register(&emc->provider); + if (err) + goto remove_nodes; + return 0; remove_nodes: icc_nodes_remove(&emc->provider); -del_provider: - icc_provider_del(&emc->provider); err_msg: dev_err(emc->dev, "failed to initialize ICC: %d\n", err); diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c index 77706e9bc543..c91e9b7e2e01 100644 --- a/drivers/memory/tegra/tegra30-emc.c +++ b/drivers/memory/tegra/tegra30-emc.c @@ -1533,15 +1533,13 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc) emc->provider.aggregate = soc->icc_ops->aggregate; emc->provider.xlate_extended = emc_of_icc_xlate_extended; - err = icc_provider_add(&emc->provider); - if (err) - goto err_msg; + icc_provider_init(&emc->provider); /* create External Memory Controller node */ node = icc_node_create(TEGRA_ICC_EMC); if (IS_ERR(node)) { err = PTR_ERR(node); - goto del_provider; + goto err_msg; } node->name = "External Memory Controller"; @@ -1562,12 +1560,14 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc) node->name = "External Memory (DRAM)"; icc_node_add(node, &emc->provider); + err = icc_provider_register(&emc->provider); + if (err) + goto remove_nodes; + return 0; remove_nodes: icc_nodes_remove(&emc->provider); -del_provider: - icc_provider_del(&emc->provider); err_msg: dev_err(emc->dev, "failed to initialize ICC: %d\n", err); diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c index e0dcd5c114db..7728fe685476 100644 --- a/drivers/misc/mei/hdcp/mei_hdcp.c +++ b/drivers/misc/mei/hdcp/mei_hdcp.c @@ -23,7 +23,7 @@ #include <linux/component.h> #include <drm/drm_connector.h> #include <drm/i915_component.h> -#include <drm/i915_mei_hdcp_interface.h> +#include <drm/i915_hdcp_interface.h> #include "mei_hdcp.h" @@ -52,13 +52,13 @@ mei_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data, session_init_in.header.api_version = HDCP_API_VERSION; session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION; - session_init_in.header.status = ME_HDCP_STATUS_SUCCESS; + session_init_in.header.status = FW_HDCP_STATUS_SUCCESS; session_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN; session_init_in.port.integrated_port_type = data->port_type; - session_init_in.port.physical_port = (u8)data->fw_ddi; - session_init_in.port.attached_transcoder = (u8)data->fw_tc; + session_init_in.port.physical_port = (u8)data->hdcp_ddi; + session_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder; session_init_in.protocol = data->protocol; byte = mei_cldev_send(cldev, (u8 *)&session_init_in, @@ -75,7 +75,7 @@ mei_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data, return byte; } - if (session_init_out.header.status != ME_HDCP_STATUS_SUCCESS) { + if (session_init_out.header.status != FW_HDCP_STATUS_SUCCESS) { dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n", WIRED_INITIATE_HDCP2_SESSION, session_init_out.header.status); @@ -122,13 +122,13 @@ mei_hdcp_verify_receiver_cert_prepare_km(struct device *dev, verify_rxcert_in.header.api_version = HDCP_API_VERSION; verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT; - verify_rxcert_in.header.status = ME_HDCP_STATUS_SUCCESS; + verify_rxcert_in.header.status = FW_HDCP_STATUS_SUCCESS; verify_rxcert_in.header.buffer_len = WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN; verify_rxcert_in.port.integrated_port_type = data->port_type; - verify_rxcert_in.port.physical_port = (u8)data->fw_ddi; - verify_rxcert_in.port.attached_transcoder = (u8)data->fw_tc; + verify_rxcert_in.port.physical_port = (u8)data->hdcp_ddi; + verify_rxcert_in.port.attached_transcoder = (u8)data->hdcp_transcoder; verify_rxcert_in.cert_rx = rx_cert->cert_rx; memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN); @@ -148,7 +148,7 @@ mei_hdcp_verify_receiver_cert_prepare_km(struct device *dev, return byte; } - if (verify_rxcert_out.header.status != ME_HDCP_STATUS_SUCCESS) { + if (verify_rxcert_out.header.status != FW_HDCP_STATUS_SUCCESS) { dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n", WIRED_VERIFY_RECEIVER_CERT, verify_rxcert_out.header.status); @@ -194,12 +194,12 @@ mei_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data, send_hprime_in.header.api_version = HDCP_API_VERSION; send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME; - send_hprime_in.header.status = ME_HDCP_STATUS_SUCCESS; + send_hprime_in.header.status = FW_HDCP_STATUS_SUCCESS; send_hprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN; send_hprime_in.port.integrated_port_type = data->port_type; - send_hprime_in.port.physical_port = (u8)data->fw_ddi; - send_hprime_in.port.attached_transcoder = (u8)data->fw_tc; + send_hprime_in.port.physical_port = (u8)data->hdcp_ddi; + send_hprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder; memcpy(send_hprime_in.h_prime, rx_hprime->h_prime, HDCP_2_2_H_PRIME_LEN); @@ -218,7 +218,7 @@ mei_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data, return byte; } - if (send_hprime_out.header.status != ME_HDCP_STATUS_SUCCESS) { + if (send_hprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n", WIRED_AKE_SEND_HPRIME, send_hprime_out.header.status); return -EIO; @@ -251,13 +251,13 @@ mei_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data, pairing_info_in.header.api_version = HDCP_API_VERSION; pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO; - pairing_info_in.header.status = ME_HDCP_STATUS_SUCCESS; + pairing_info_in.header.status = FW_HDCP_STATUS_SUCCESS; pairing_info_in.header.buffer_len = WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN; pairing_info_in.port.integrated_port_type = data->port_type; - pairing_info_in.port.physical_port = (u8)data->fw_ddi; - pairing_info_in.port.attached_transcoder = (u8)data->fw_tc; + pairing_info_in.port.physical_port = (u8)data->hdcp_ddi; + pairing_info_in.port.attached_transcoder = (u8)data->hdcp_transcoder; memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km, HDCP_2_2_E_KH_KM_LEN); @@ -276,7 +276,7 @@ mei_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data, return byte; } - if (pairing_info_out.header.status != ME_HDCP_STATUS_SUCCESS) { + if (pairing_info_out.header.status != FW_HDCP_STATUS_SUCCESS) { dev_dbg(dev, "ME cmd 0x%08X failed. Status: 0x%X\n", WIRED_AKE_SEND_PAIRING_INFO, pairing_info_out.header.status); @@ -311,12 +311,12 @@ mei_hdcp_initiate_locality_check(struct device *dev, lc_init_in.header.api_version = HDCP_API_VERSION; lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK; - lc_init_in.header.status = ME_HDCP_STATUS_SUCCESS; + lc_init_in.header.status = FW_HDCP_STATUS_SUCCESS; lc_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN; lc_init_in.port.integrated_port_type = data->port_type; - lc_init_in.port.physical_port = (u8)data->fw_ddi; - lc_init_in.port.attached_transcoder = (u8)data->fw_tc; + lc_init_in.port.physical_port = (u8)data->hdcp_ddi; + lc_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder; byte = mei_cldev_send(cldev, (u8 *)&lc_init_in, sizeof(lc_init_in)); if (byte < 0) { @@ -330,7 +330,7 @@ mei_hdcp_initiate_locality_check(struct device *dev, return byte; } - if (lc_init_out.header.status != ME_HDCP_STATUS_SUCCESS) { + if (lc_init_out.header.status != FW_HDCP_STATUS_SUCCESS) { dev_dbg(dev, "ME cmd 0x%08X Failed. status: 0x%X\n", WIRED_INIT_LOCALITY_CHECK, lc_init_out.header.status); return -EIO; @@ -366,13 +366,13 @@ mei_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data, verify_lprime_in.header.api_version = HDCP_API_VERSION; verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY; - verify_lprime_in.header.status = ME_HDCP_STATUS_SUCCESS; + verify_lprime_in.header.status = FW_HDCP_STATUS_SUCCESS; verify_lprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN; verify_lprime_in.port.integrated_port_type = data->port_type; - verify_lprime_in.port.physical_port = (u8)data->fw_ddi; - verify_lprime_in.port.attached_transcoder = (u8)data->fw_tc; + verify_lprime_in.port.physical_port = (u8)data->hdcp_ddi; + verify_lprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder; memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime, HDCP_2_2_L_PRIME_LEN); @@ -391,7 +391,7 @@ mei_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data, return byte; } - if (verify_lprime_out.header.status != ME_HDCP_STATUS_SUCCESS) { + if (verify_lprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n", WIRED_VALIDATE_LOCALITY, verify_lprime_out.header.status); @@ -425,12 +425,12 @@ static int mei_hdcp_get_session_key(struct device *dev, get_skey_in.header.api_version = HDCP_API_VERSION; get_skey_in.header.command_id = WIRED_GET_SESSION_KEY; - get_skey_in.header.status = ME_HDCP_STATUS_SUCCESS; + get_skey_in.header.status = FW_HDCP_STATUS_SUCCESS; get_skey_in.header.buffer_len = WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN; get_skey_in.port.integrated_port_type = data->port_type; - get_skey_in.port.physical_port = (u8)data->fw_ddi; - get_skey_in.port.attached_transcoder = (u8)data->fw_tc; + get_skey_in.port.physical_port = (u8)data->hdcp_ddi; + get_skey_in.port.attached_transcoder = (u8)data->hdcp_transcoder; byte = mei_cldev_send(cldev, (u8 *)&get_skey_in, sizeof(get_skey_in)); if (byte < 0) { @@ -445,7 +445,7 @@ static int mei_hdcp_get_session_key(struct device *dev, return byte; } - if (get_skey_out.header.status != ME_HDCP_STATUS_SUCCESS) { + if (get_skey_out.header.status != FW_HDCP_STATUS_SUCCESS) { dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n", WIRED_GET_SESSION_KEY, get_skey_out.header.status); return -EIO; @@ -489,13 +489,13 @@ mei_hdcp_repeater_check_flow_prepare_ack(struct device *dev, verify_repeater_in.header.api_version = HDCP_API_VERSION; verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER; - verify_repeater_in.header.status = ME_HDCP_STATUS_SUCCESS; + verify_repeater_in.header.status = FW_HDCP_STATUS_SUCCESS; verify_repeater_in.header.buffer_len = WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN; verify_repeater_in.port.integrated_port_type = data->port_type; - verify_repeater_in.port.physical_port = (u8)data->fw_ddi; - verify_repeater_in.port.attached_transcoder = (u8)data->fw_tc; + verify_repeater_in.port.physical_port = (u8)data->hdcp_ddi; + verify_repeater_in.port.attached_transcoder = (u8)data->hdcp_transcoder; memcpy(verify_repeater_in.rx_info, rep_topology->rx_info, HDCP_2_2_RXINFO_LEN); @@ -520,7 +520,7 @@ mei_hdcp_repeater_check_flow_prepare_ack(struct device *dev, return byte; } - if (verify_repeater_out.header.status != ME_HDCP_STATUS_SUCCESS) { + if (verify_repeater_out.header.status != FW_HDCP_STATUS_SUCCESS) { dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n", WIRED_VERIFY_REPEATER, verify_repeater_out.header.status); @@ -568,12 +568,12 @@ static int mei_hdcp_verify_mprime(struct device *dev, verify_mprime_in->header.api_version = HDCP_API_VERSION; verify_mprime_in->header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ; - verify_mprime_in->header.status = ME_HDCP_STATUS_SUCCESS; + verify_mprime_in->header.status = FW_HDCP_STATUS_SUCCESS; verify_mprime_in->header.buffer_len = cmd_size - sizeof(verify_mprime_in->header); verify_mprime_in->port.integrated_port_type = data->port_type; - verify_mprime_in->port.physical_port = (u8)data->fw_ddi; - verify_mprime_in->port.attached_transcoder = (u8)data->fw_tc; + verify_mprime_in->port.physical_port = (u8)data->hdcp_ddi; + verify_mprime_in->port.attached_transcoder = (u8)data->hdcp_transcoder; memcpy(verify_mprime_in->m_prime, stream_ready->m_prime, HDCP_2_2_MPRIME_LEN); drm_hdcp_cpu_to_be24(verify_mprime_in->seq_num_m, data->seq_num_m); @@ -597,7 +597,7 @@ static int mei_hdcp_verify_mprime(struct device *dev, return byte; } - if (verify_mprime_out.header.status != ME_HDCP_STATUS_SUCCESS) { + if (verify_mprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n", WIRED_REPEATER_AUTH_STREAM_REQ, verify_mprime_out.header.status); @@ -630,12 +630,12 @@ static int mei_hdcp_enable_authentication(struct device *dev, enable_auth_in.header.api_version = HDCP_API_VERSION; enable_auth_in.header.command_id = WIRED_ENABLE_AUTH; - enable_auth_in.header.status = ME_HDCP_STATUS_SUCCESS; + enable_auth_in.header.status = FW_HDCP_STATUS_SUCCESS; enable_auth_in.header.buffer_len = WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN; enable_auth_in.port.integrated_port_type = data->port_type; - enable_auth_in.port.physical_port = (u8)data->fw_ddi; - enable_auth_in.port.attached_transcoder = (u8)data->fw_tc; + enable_auth_in.port.physical_port = (u8)data->hdcp_ddi; + enable_auth_in.port.attached_transcoder = (u8)data->hdcp_transcoder; enable_auth_in.stream_type = data->streams[0].stream_type; byte = mei_cldev_send(cldev, (u8 *)&enable_auth_in, @@ -652,7 +652,7 @@ static int mei_hdcp_enable_authentication(struct device *dev, return byte; } - if (enable_auth_out.header.status != ME_HDCP_STATUS_SUCCESS) { + if (enable_auth_out.header.status != FW_HDCP_STATUS_SUCCESS) { dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n", WIRED_ENABLE_AUTH, enable_auth_out.header.status); return -EIO; @@ -684,13 +684,13 @@ mei_hdcp_close_session(struct device *dev, struct hdcp_port_data *data) session_close_in.header.api_version = HDCP_API_VERSION; session_close_in.header.command_id = WIRED_CLOSE_SESSION; - session_close_in.header.status = ME_HDCP_STATUS_SUCCESS; + session_close_in.header.status = FW_HDCP_STATUS_SUCCESS; session_close_in.header.buffer_len = WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN; session_close_in.port.integrated_port_type = data->port_type; - session_close_in.port.physical_port = (u8)data->fw_ddi; - session_close_in.port.attached_transcoder = (u8)data->fw_tc; + session_close_in.port.physical_port = (u8)data->hdcp_ddi; + session_close_in.port.attached_transcoder = (u8)data->hdcp_transcoder; byte = mei_cldev_send(cldev, (u8 *)&session_close_in, sizeof(session_close_in)); @@ -706,7 +706,7 @@ mei_hdcp_close_session(struct device *dev, struct hdcp_port_data *data) return byte; } - if (session_close_out.header.status != ME_HDCP_STATUS_SUCCESS) { + if (session_close_out.header.status != FW_HDCP_STATUS_SUCCESS) { dev_dbg(dev, "Session Close Failed. status: 0x%X\n", session_close_out.header.status); return -EIO; @@ -715,7 +715,7 @@ mei_hdcp_close_session(struct device *dev, struct hdcp_port_data *data) return 0; } -static const struct i915_hdcp_component_ops mei_hdcp_ops = { +static const struct i915_hdcp_ops mei_hdcp_ops = { .owner = THIS_MODULE, .initiate_hdcp2_session = mei_hdcp_initiate_session, .verify_receiver_cert_prepare_km = @@ -735,13 +735,12 @@ static const struct i915_hdcp_component_ops mei_hdcp_ops = { static int mei_component_master_bind(struct device *dev) { struct mei_cl_device *cldev = to_mei_cl_device(dev); - struct i915_hdcp_comp_master *comp_master = - mei_cldev_get_drvdata(cldev); + struct i915_hdcp_master *comp_master = mei_cldev_get_drvdata(cldev); int ret; dev_dbg(dev, "%s\n", __func__); comp_master->ops = &mei_hdcp_ops; - comp_master->mei_dev = dev; + comp_master->hdcp_dev = dev; ret = component_bind_all(dev, comp_master); if (ret < 0) return ret; @@ -752,8 +751,7 @@ static int mei_component_master_bind(struct device *dev) static void mei_component_master_unbind(struct device *dev) { struct mei_cl_device *cldev = to_mei_cl_device(dev); - struct i915_hdcp_comp_master *comp_master = - mei_cldev_get_drvdata(cldev); + struct i915_hdcp_master *comp_master = mei_cldev_get_drvdata(cldev); dev_dbg(dev, "%s\n", __func__); component_unbind_all(dev, comp_master); @@ -801,7 +799,7 @@ static int mei_hdcp_component_match(struct device *dev, int subcomponent, static int mei_hdcp_probe(struct mei_cl_device *cldev, const struct mei_cl_device_id *id) { - struct i915_hdcp_comp_master *comp_master; + struct i915_hdcp_master *comp_master; struct component_match *master_match; int ret; @@ -846,8 +844,7 @@ enable_err_exit: static void mei_hdcp_remove(struct mei_cl_device *cldev) { - struct i915_hdcp_comp_master *comp_master = - mei_cldev_get_drvdata(cldev); + struct i915_hdcp_master *comp_master = mei_cldev_get_drvdata(cldev); int ret; component_master_del(&cldev->dev, &mei_component_master_ops); diff --git a/drivers/misc/mei/hdcp/mei_hdcp.h b/drivers/misc/mei/hdcp/mei_hdcp.h index ca09c8f83d6b..0683d83ec17a 100644 --- a/drivers/misc/mei/hdcp/mei_hdcp.h +++ b/drivers/misc/mei/hdcp/mei_hdcp.h @@ -11,358 +11,4 @@ #include <drm/display/drm_hdcp.h> -/* me_hdcp_status: Enumeration of all HDCP Status Codes */ -enum me_hdcp_status { - ME_HDCP_STATUS_SUCCESS = 0x0000, - - /* WiDi Generic Status Codes */ - ME_HDCP_STATUS_INTERNAL_ERROR = 0x1000, - ME_HDCP_STATUS_UNKNOWN_ERROR = 0x1001, - ME_HDCP_STATUS_INCORRECT_API_VERSION = 0x1002, - ME_HDCP_STATUS_INVALID_FUNCTION = 0x1003, - ME_HDCP_STATUS_INVALID_BUFFER_LENGTH = 0x1004, - ME_HDCP_STATUS_INVALID_PARAMS = 0x1005, - ME_HDCP_STATUS_AUTHENTICATION_FAILED = 0x1006, - - /* WiDi Status Codes */ - ME_HDCP_INVALID_SESSION_STATE = 0x6000, - ME_HDCP_SRM_FRAGMENT_UNEXPECTED = 0x6001, - ME_HDCP_SRM_INVALID_LENGTH = 0x6002, - ME_HDCP_SRM_FRAGMENT_OFFSET_INVALID = 0x6003, - ME_HDCP_SRM_VERIFICATION_FAILED = 0x6004, - ME_HDCP_SRM_VERSION_TOO_OLD = 0x6005, - ME_HDCP_RX_CERT_VERIFICATION_FAILED = 0x6006, - ME_HDCP_RX_REVOKED = 0x6007, - ME_HDCP_H_VERIFICATION_FAILED = 0x6008, - ME_HDCP_REPEATER_CHECK_UNEXPECTED = 0x6009, - ME_HDCP_TOPOLOGY_MAX_EXCEEDED = 0x600A, - ME_HDCP_V_VERIFICATION_FAILED = 0x600B, - ME_HDCP_L_VERIFICATION_FAILED = 0x600C, - ME_HDCP_STREAM_KEY_ALLOC_FAILED = 0x600D, - ME_HDCP_BASE_KEY_RESET_FAILED = 0x600E, - ME_HDCP_NONCE_GENERATION_FAILED = 0x600F, - ME_HDCP_STATUS_INVALID_E_KEY_STATE = 0x6010, - ME_HDCP_STATUS_INVALID_CS_ICV = 0x6011, - ME_HDCP_STATUS_INVALID_KB_KEY_STATE = 0x6012, - ME_HDCP_STATUS_INVALID_PAVP_MODE_ICV = 0x6013, - ME_HDCP_STATUS_INVALID_PAVP_MODE = 0x6014, - ME_HDCP_STATUS_LC_MAX_ATTEMPTS = 0x6015, - - /* New status for HDCP 2.1 */ - ME_HDCP_STATUS_MISMATCH_IN_M = 0x6016, - - /* New status code for HDCP 2.2 Rx */ - ME_HDCP_STATUS_RX_PROV_NOT_ALLOWED = 0x6017, - ME_HDCP_STATUS_RX_PROV_WRONG_SUBJECT = 0x6018, - ME_HDCP_RX_NEEDS_PROVISIONING = 0x6019, - ME_HDCP_BKSV_ICV_AUTH_FAILED = 0x6020, - ME_HDCP_STATUS_INVALID_STREAM_ID = 0x6021, - ME_HDCP_STATUS_CHAIN_NOT_INITIALIZED = 0x6022, - ME_HDCP_FAIL_NOT_EXPECTED = 0x6023, - ME_HDCP_FAIL_HDCP_OFF = 0x6024, - ME_HDCP_FAIL_INVALID_PAVP_MEMORY_MODE = 0x6025, - ME_HDCP_FAIL_AES_ECB_FAILURE = 0x6026, - ME_HDCP_FEATURE_NOT_SUPPORTED = 0x6027, - ME_HDCP_DMA_READ_ERROR = 0x6028, - ME_HDCP_DMA_WRITE_ERROR = 0x6029, - ME_HDCP_FAIL_INVALID_PACKET_SIZE = 0x6030, - ME_HDCP_H264_PARSING_ERROR = 0x6031, - ME_HDCP_HDCP2_ERRATA_VIDEO_VIOLATION = 0x6032, - ME_HDCP_HDCP2_ERRATA_AUDIO_VIOLATION = 0x6033, - ME_HDCP_TX_ACTIVE_ERROR = 0x6034, - ME_HDCP_MODE_CHANGE_ERROR = 0x6035, - ME_HDCP_STREAM_TYPE_ERROR = 0x6036, - ME_HDCP_STREAM_MANAGE_NOT_POSSIBLE = 0x6037, - - ME_HDCP_STATUS_PORT_INVALID_COMMAND = 0x6038, - ME_HDCP_STATUS_UNSUPPORTED_PROTOCOL = 0x6039, - ME_HDCP_STATUS_INVALID_PORT_INDEX = 0x603a, - ME_HDCP_STATUS_TX_AUTH_NEEDED = 0x603b, - ME_HDCP_STATUS_NOT_INTEGRATED_PORT = 0x603c, - ME_HDCP_STATUS_SESSION_MAX_REACHED = 0x603d, - - /* hdcp capable bit is not set in rx_caps(error is unique to DP) */ - ME_HDCP_STATUS_NOT_HDCP_CAPABLE = 0x6041, - - ME_HDCP_STATUS_INVALID_STREAM_COUNT = 0x6042, -}; - -#define HDCP_API_VERSION 0x00010000 - -#define HDCP_M_LEN 16 -#define HDCP_KH_LEN 16 - -/* Payload Buffer size(Excluding Header) for CMDs and corresponding response */ -/* Wired_Tx_AKE */ -#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN (4 + 1) -#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_OUT (4 + 8 + 3) - -#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN (4 + 522 + 8 + 3) -#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MIN_OUT (4 + 1 + 3 + 16 + 16) -#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MAX_OUT (4 + 1 + 3 + 128) - -#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN (4 + 32) -#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_OUT (4) - -#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN (4 + 16) -#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_OUT (4) - -#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN (4) -#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_OUT (4) - -/* Wired_Tx_LC */ -#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN (4) -#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_OUT (4 + 8) - -#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN (4 + 32) -#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_OUT (4) - -/* Wired_Tx_SKE */ -#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN (4) -#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_OUT (4 + 16 + 8) - -/* Wired_Tx_SKE */ -#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN (4 + 1) -#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_OUT (4) - -/* Wired_Tx_Repeater */ -#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN (4 + 2 + 3 + 16 + 155) -#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_OUT (4 + 1 + 16) - -#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN (4 + 3 + \ - 32 + 2 + 2) - -#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_OUT (4) - -/* hdcp_command_id: Enumeration of all WIRED HDCP Command IDs */ -enum hdcp_command_id { - _WIDI_COMMAND_BASE = 0x00030000, - WIDI_INITIATE_HDCP2_SESSION = _WIDI_COMMAND_BASE, - HDCP_GET_SRM_STATUS, - HDCP_SEND_SRM_FRAGMENT, - - /* The wired HDCP Tx commands */ - _WIRED_COMMAND_BASE = 0x00031000, - WIRED_INITIATE_HDCP2_SESSION = _WIRED_COMMAND_BASE, - WIRED_VERIFY_RECEIVER_CERT, - WIRED_AKE_SEND_HPRIME, - WIRED_AKE_SEND_PAIRING_INFO, - WIRED_INIT_LOCALITY_CHECK, - WIRED_VALIDATE_LOCALITY, - WIRED_GET_SESSION_KEY, - WIRED_ENABLE_AUTH, - WIRED_VERIFY_REPEATER, - WIRED_REPEATER_AUTH_STREAM_REQ, - WIRED_CLOSE_SESSION, - - _WIRED_COMMANDS_COUNT, -}; - -union encrypted_buff { - u8 e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN]; - u8 e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN]; - struct { - u8 e_kh_km[HDCP_KH_LEN]; - u8 m[HDCP_M_LEN]; - } __packed; -}; - -/* HDCP HECI message header. All header values are little endian. */ -struct hdcp_cmd_header { - u32 api_version; - u32 command_id; - enum me_hdcp_status status; - /* Length of the HECI message (excluding the header) */ - u32 buffer_len; -} __packed; - -/* Empty command request or response. No data follows the header. */ -struct hdcp_cmd_no_data { - struct hdcp_cmd_header header; -} __packed; - -/* Uniquely identifies the hdcp port being addressed for a given command. */ -struct hdcp_port_id { - u8 integrated_port_type; - /* physical_port is used until Gen11.5. Must be zero for Gen11.5+ */ - u8 physical_port; - /* attached_transcoder is for Gen11.5+. Set to zero for <Gen11.5 */ - u8 attached_transcoder; - u8 reserved; -} __packed; - -/* - * Data structures for integrated wired HDCP2 Tx in - * support of the AKE protocol - */ -/* HECI struct for integrated wired HDCP Tx session initiation. */ -struct wired_cmd_initiate_hdcp2_session_in { - struct hdcp_cmd_header header; - struct hdcp_port_id port; - u8 protocol; /* for HDMI vs DP */ -} __packed; - -struct wired_cmd_initiate_hdcp2_session_out { - struct hdcp_cmd_header header; - struct hdcp_port_id port; - u8 r_tx[HDCP_2_2_RTX_LEN]; - struct hdcp2_tx_caps tx_caps; -} __packed; - -/* HECI struct for ending an integrated wired HDCP Tx session. */ -struct wired_cmd_close_session_in { - struct hdcp_cmd_header header; - struct hdcp_port_id port; -} __packed; - -struct wired_cmd_close_session_out { - struct hdcp_cmd_header header; - struct hdcp_port_id port; -} __packed; - -/* HECI struct for integrated wired HDCP Tx Rx Cert verification. */ -struct wired_cmd_verify_receiver_cert_in { - struct hdcp_cmd_header header; - struct hdcp_port_id port; - struct hdcp2_cert_rx cert_rx; - u8 r_rx[HDCP_2_2_RRX_LEN]; - u8 rx_caps[HDCP_2_2_RXCAPS_LEN]; -} __packed; - -struct wired_cmd_verify_receiver_cert_out { - struct hdcp_cmd_header header; - struct hdcp_port_id port; - u8 km_stored; - u8 reserved[3]; - union encrypted_buff ekm_buff; -} __packed; - -/* HECI struct for verification of Rx's Hprime in a HDCP Tx session */ -struct wired_cmd_ake_send_hprime_in { - struct hdcp_cmd_header header; - struct hdcp_port_id port; - u8 h_prime[HDCP_2_2_H_PRIME_LEN]; -} __packed; - -struct wired_cmd_ake_send_hprime_out { - struct hdcp_cmd_header header; - struct hdcp_port_id port; -} __packed; - -/* - * HECI struct for sending in AKE pairing data generated by the Rx in an - * integrated wired HDCP Tx session. - */ -struct wired_cmd_ake_send_pairing_info_in { - struct hdcp_cmd_header header; - struct hdcp_port_id port; - u8 e_kh_km[HDCP_2_2_E_KH_KM_LEN]; -} __packed; - -struct wired_cmd_ake_send_pairing_info_out { - struct hdcp_cmd_header header; - struct hdcp_port_id port; -} __packed; - -/* Data structures for integrated wired HDCP2 Tx in support of the LC protocol*/ -/* - * HECI struct for initiating locality check with an - * integrated wired HDCP Tx session. - */ -struct wired_cmd_init_locality_check_in { - struct hdcp_cmd_header header; - struct hdcp_port_id port; -} __packed; - -struct wired_cmd_init_locality_check_out { - struct hdcp_cmd_header header; - struct hdcp_port_id port; - u8 r_n[HDCP_2_2_RN_LEN]; -} __packed; - -/* - * HECI struct for validating an Rx's LPrime value in an - * integrated wired HDCP Tx session. - */ -struct wired_cmd_validate_locality_in { - struct hdcp_cmd_header header; - struct hdcp_port_id port; - u8 l_prime[HDCP_2_2_L_PRIME_LEN]; -} __packed; - -struct wired_cmd_validate_locality_out { - struct hdcp_cmd_header header; - struct hdcp_port_id port; -} __packed; - -/* - * Data structures for integrated wired HDCP2 Tx in support of the - * SKE protocol - */ -/* HECI struct for creating session key */ -struct wired_cmd_get_session_key_in { - struct hdcp_cmd_header header; - struct hdcp_port_id port; -} __packed; - -struct wired_cmd_get_session_key_out { - struct hdcp_cmd_header header; - struct hdcp_port_id port; - u8 e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN]; - u8 r_iv[HDCP_2_2_RIV_LEN]; -} __packed; - -/* HECI struct for the Tx enable authentication command */ -struct wired_cmd_enable_auth_in { - struct hdcp_cmd_header header; - struct hdcp_port_id port; - u8 stream_type; -} __packed; - -struct wired_cmd_enable_auth_out { - struct hdcp_cmd_header header; - struct hdcp_port_id port; -} __packed; - -/* - * Data structures for integrated wired HDCP2 Tx in support of - * the repeater protocols - */ -/* - * HECI struct for verifying the downstream repeater's HDCP topology in an - * integrated wired HDCP Tx session. - */ -struct wired_cmd_verify_repeater_in { - struct hdcp_cmd_header header; - struct hdcp_port_id port; - u8 rx_info[HDCP_2_2_RXINFO_LEN]; - u8 seq_num_v[HDCP_2_2_SEQ_NUM_LEN]; - u8 v_prime[HDCP_2_2_V_PRIME_HALF_LEN]; - u8 receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN]; -} __packed; - -struct wired_cmd_verify_repeater_out { - struct hdcp_cmd_header header; - struct hdcp_port_id port; - u8 content_type_supported; - u8 v[HDCP_2_2_V_PRIME_HALF_LEN]; -} __packed; - -/* - * HECI struct in support of stream management in an - * integrated wired HDCP Tx session. - */ -struct wired_cmd_repeater_auth_stream_req_in { - struct hdcp_cmd_header header; - struct hdcp_port_id port; - u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN]; - u8 m_prime[HDCP_2_2_MPRIME_LEN]; - __be16 k; - struct hdcp2_streamid_type streams[]; -} __packed; - -struct wired_cmd_repeater_auth_stream_req_out { - struct hdcp_cmd_header header; - struct hdcp_port_id port; -} __packed; #endif /* __MEI_HDCP_H__ */ diff --git a/drivers/mmc/host/dw_mmc-starfive.c b/drivers/mmc/host/dw_mmc-starfive.c index 40f5969b07a6..dab1508bf83c 100644 --- a/drivers/mmc/host/dw_mmc-starfive.c +++ b/drivers/mmc/host/dw_mmc-starfive.c @@ -51,7 +51,7 @@ static int dw_mci_starfive_execute_tuning(struct dw_mci_slot *slot, struct dw_mci *host = slot->host; struct starfive_priv *priv = host->priv; int rise_point = -1, fall_point = -1; - int err, prev_err; + int err, prev_err = 0; int i; bool found = 0; u32 regval; diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index 7ef828942df3..89953093e20c 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -369,7 +369,7 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg) MAX_POWER_ON_TIMEOUT, false, host, val, reg); if (ret) - dev_warn(mmc_dev(host->mmc), "Power on failed\n"); + dev_info(mmc_dev(host->mmc), "Power on failed\n"); } } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 00646aa315c3..236e5219c811 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1775,6 +1775,19 @@ void bond_lower_state_changed(struct slave *slave) slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg); \ } while (0) +/* The bonding driver uses ether_setup() to convert a master bond device + * to ARPHRD_ETHER, that resets the target netdevice's flags so we always + * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE if it was set + */ +static void bond_ether_setup(struct net_device *bond_dev) +{ + unsigned int slave_flag = bond_dev->flags & IFF_SLAVE; + + ether_setup(bond_dev); + bond_dev->flags |= IFF_MASTER | slave_flag; + bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; +} + /* enslave device <slave> to bond device <master> */ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, struct netlink_ext_ack *extack) @@ -1866,10 +1879,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, if (slave_dev->type != ARPHRD_ETHER) bond_setup_by_slave(bond_dev, slave_dev); - else { - ether_setup(bond_dev); - bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; - } + else + bond_ether_setup(bond_dev); call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, bond_dev); @@ -2289,9 +2300,7 @@ err_undo_flags: eth_hw_addr_random(bond_dev); if (bond_dev->type != ARPHRD_ETHER) { dev_close(bond_dev); - ether_setup(bond_dev); - bond_dev->flags |= IFF_MASTER; - bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; + bond_ether_setup(bond_dev); } } diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c index 8d916e2ee6c2..8dcc32e4e30e 100644 --- a/drivers/net/can/cc770/cc770_platform.c +++ b/drivers/net/can/cc770/cc770_platform.c @@ -93,20 +93,20 @@ static int cc770_get_of_node_data(struct platform_device *pdev, if (priv->can.clock.freq > 8000000) priv->cpu_interface |= CPUIF_DMC; - if (of_get_property(np, "bosch,divide-memory-clock", NULL)) + if (of_property_read_bool(np, "bosch,divide-memory-clock")) priv->cpu_interface |= CPUIF_DMC; - if (of_get_property(np, "bosch,iso-low-speed-mux", NULL)) + if (of_property_read_bool(np, "bosch,iso-low-speed-mux")) priv->cpu_interface |= CPUIF_MUX; if (!of_get_property(np, "bosch,no-comperator-bypass", NULL)) priv->bus_config |= BUSCFG_CBY; - if (of_get_property(np, "bosch,disconnect-rx0-input", NULL)) + if (of_property_read_bool(np, "bosch,disconnect-rx0-input")) priv->bus_config |= BUSCFG_DR0; - if (of_get_property(np, "bosch,disconnect-rx1-input", NULL)) + if (of_property_read_bool(np, "bosch,disconnect-rx1-input")) priv->bus_config |= BUSCFG_DR1; - if (of_get_property(np, "bosch,disconnect-tx1-output", NULL)) + if (of_property_read_bool(np, "bosch,disconnect-tx1-output")) priv->bus_config |= BUSCFG_DT1; - if (of_get_property(np, "bosch,polarity-dominant", NULL)) + if (of_property_read_bool(np, "bosch,polarity-dominant")) priv->bus_config |= BUSCFG_POL; prop = of_get_property(np, "bosch,clock-out-frequency", &prop_size); diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c index e968322dfbf0..70887e0aece3 100644 --- a/drivers/net/dsa/b53/b53_mmap.c +++ b/drivers/net/dsa/b53/b53_mmap.c @@ -263,7 +263,7 @@ static int b53_mmap_probe_of(struct platform_device *pdev, if (of_property_read_u32(of_port, "reg", ®)) continue; - if (reg < B53_CPU_PORT) + if (reg < B53_N_PORTS) pdata->enabled_ports |= BIT(reg); } diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 729b36eeb2c4..7fc2155d93d6 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -319,7 +319,7 @@ static const u16 ksz8795_regs[] = { [S_BROADCAST_CTRL] = 0x06, [S_MULTICAST_CTRL] = 0x04, [P_XMII_CTRL_0] = 0x06, - [P_XMII_CTRL_1] = 0x56, + [P_XMII_CTRL_1] = 0x06, }; static const u32 ksz8795_masks[] = { diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index a508402c4ecb..02410ac439b7 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -396,6 +396,9 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid, /* Set up switch core clock for MT7530 */ static void mt7530_pll_setup(struct mt7530_priv *priv) { + /* Disable core clock */ + core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); + /* Disable PLL */ core_write(priv, CORE_GSWPLL_GRP1, 0); @@ -409,14 +412,19 @@ static void mt7530_pll_setup(struct mt7530_priv *priv) RG_GSWPLL_EN_PRE | RG_GSWPLL_POSDIV_200M(2) | RG_GSWPLL_FBKDIV_200M(32)); + + udelay(20); + + /* Enable core clock */ + core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN); } -/* Setup TX circuit including relevant PAD and driving */ +/* Setup port 6 interface mode and TRGMII TX circuit */ static int mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface) { struct mt7530_priv *priv = ds->priv; - u32 ncpo1, ssc_delta, trgint, i, xtal; + u32 ncpo1, ssc_delta, trgint, xtal; xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK; @@ -430,11 +438,13 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface) switch (interface) { case PHY_INTERFACE_MODE_RGMII: trgint = 0; - /* PLL frequency: 125MHz */ - ncpo1 = 0x0c80; break; case PHY_INTERFACE_MODE_TRGMII: trgint = 1; + if (xtal == HWTRAP_XTAL_25MHZ) + ssc_delta = 0x57; + else + ssc_delta = 0x87; if (priv->id == ID_MT7621) { /* PLL frequency: 150MHz: 1.2GBit */ if (xtal == HWTRAP_XTAL_40MHZ) @@ -454,46 +464,32 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface) return -EINVAL; } - if (xtal == HWTRAP_XTAL_25MHZ) - ssc_delta = 0x57; - else - ssc_delta = 0x87; - mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK, P6_INTF_MODE(trgint)); - /* Lower Tx Driving for TRGMII path */ - for (i = 0 ; i < NUM_TRGMII_CTRL ; i++) - mt7530_write(priv, MT7530_TRGMII_TD_ODT(i), - TD_DM_DRVP(8) | TD_DM_DRVN(8)); + if (trgint) { + /* Disable the MT7530 TRGMII clocks */ + core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN); + + /* Setup the MT7530 TRGMII Tx Clock */ + core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1)); + core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0)); + core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta)); + core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta)); + core_write(priv, CORE_PLL_GROUP4, + RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN | + RG_SYSPLL_BIAS_LPF_EN); + core_write(priv, CORE_PLL_GROUP2, + RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN | + RG_SYSPLL_POSDIV(1)); + core_write(priv, CORE_PLL_GROUP7, + RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) | + RG_LCDDS_PWDB | RG_LCDDS_ISO_EN); + + /* Enable the MT7530 TRGMII clocks */ + core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN); + } - /* Disable MT7530 core and TRGMII Tx clocks */ - core_clear(priv, CORE_TRGMII_GSW_CLK_CG, - REG_GSWCK_EN | REG_TRGMIICK_EN); - - /* Setup the MT7530 TRGMII Tx Clock */ - core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1)); - core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0)); - core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta)); - core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta)); - core_write(priv, CORE_PLL_GROUP4, - RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN | - RG_SYSPLL_BIAS_LPF_EN); - core_write(priv, CORE_PLL_GROUP2, - RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN | - RG_SYSPLL_POSDIV(1)); - core_write(priv, CORE_PLL_GROUP7, - RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) | - RG_LCDDS_PWDB | RG_LCDDS_ISO_EN); - - /* Enable MT7530 core and TRGMII Tx clocks */ - core_set(priv, CORE_TRGMII_GSW_CLK_CG, - REG_GSWCK_EN | REG_TRGMIICK_EN); - - if (!trgint) - for (i = 0 ; i < NUM_TRGMII_CTRL; i++) - mt7530_rmw(priv, MT7530_TRGMII_RD(i), - RD_TAP_MASK, RD_TAP(16)); return 0; } @@ -2201,7 +2197,16 @@ mt7530_setup(struct dsa_switch *ds) mt7530_pll_setup(priv); - /* Enable Port 6 only; P5 as GMAC5 which currently is not supported */ + /* Lower Tx driving for TRGMII path */ + for (i = 0; i < NUM_TRGMII_CTRL; i++) + mt7530_write(priv, MT7530_TRGMII_TD_ODT(i), + TD_DM_DRVP(8) | TD_DM_DRVN(8)); + + for (i = 0; i < NUM_TRGMII_CTRL; i++) + mt7530_rmw(priv, MT7530_TRGMII_RD(i), + RD_TAP_MASK, RD_TAP(16)); + + /* Enable port 6 */ val = mt7530_read(priv, MT7530_MHWTRAP); val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS; val |= MHWTRAP_MANUAL; diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 0a5d6c7bb128..30383c4f8fd0 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3549,7 +3549,7 @@ static int mv88e6xxx_get_max_mtu(struct dsa_switch *ds, int port) return 10240 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN; else if (chip->info->ops->set_max_frame_size) return 1632 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN; - return 1522 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN; + return ETH_DATA_LEN; } static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu) @@ -3557,6 +3557,17 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu) struct mv88e6xxx_chip *chip = ds->priv; int ret = 0; + /* For families where we don't know how to alter the MTU, + * just accept any value up to ETH_DATA_LEN + */ + if (!chip->info->ops->port_set_jumbo_size && + !chip->info->ops->set_max_frame_size) { + if (new_mtu > ETH_DATA_LEN) + return -EINVAL; + + return 0; + } + if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) new_mtu += EDSA_HLEN; @@ -3565,9 +3576,6 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu) ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu); else if (chip->info->ops->set_max_frame_size) ret = chip->info->ops->set_max_frame_size(chip, new_mtu); - else - if (new_mtu > 1522) - ret = -EINVAL; mv88e6xxx_reg_unlock(chip); return ret; diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 8da79eedc057..1d4f2f4d10f2 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -850,11 +850,20 @@ static int ena_set_channels(struct net_device *netdev, struct ena_adapter *adapter = netdev_priv(netdev); u32 count = channels->combined_count; /* The check for max value is already done in ethtool */ - if (count < ENA_MIN_NUM_IO_QUEUES || - (ena_xdp_present(adapter) && - !ena_xdp_legal_queue_count(adapter, count))) + if (count < ENA_MIN_NUM_IO_QUEUES) return -EINVAL; + if (!ena_xdp_legal_queue_count(adapter, count)) { + if (ena_xdp_present(adapter)) + return -EINVAL; + + xdp_clear_features_flag(netdev); + } else { + xdp_set_features_flag(netdev, + NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT); + } + return ena_update_queue_count(adapter, count); } diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index d3999db7c6a2..cbfe7f977270 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -4105,8 +4105,6 @@ static void ena_set_conf_feat_params(struct ena_adapter *adapter, /* Set offload features */ ena_set_dev_offloads(feat, netdev); - netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT; - adapter->max_mtu = feat->dev_attr.max_mtu; netdev->max_mtu = adapter->max_mtu; netdev->min_mtu = ENA_MIN_MTU; @@ -4393,6 +4391,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ena_config_debug_area(adapter); + if (ena_xdp_legal_queue_count(adapter, adapter->num_io_queues)) + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT; + memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len); netif_carrier_off(netdev); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 1e8d902e1c8e..7f933175cbda 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -412,6 +412,25 @@ int aq_xdp_xmit(struct net_device *dev, int num_frames, return num_frames - drop; } +static struct sk_buff *aq_xdp_build_skb(struct xdp_buff *xdp, + struct net_device *dev, + struct aq_ring_buff_s *buff) +{ + struct xdp_frame *xdpf; + struct sk_buff *skb; + + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) + return NULL; + + skb = xdp_build_skb_from_frame(xdpf, dev); + if (!skb) + return NULL; + + aq_get_rxpages_xdp(buff, xdp); + return skb; +} + static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic, struct xdp_buff *xdp, struct aq_ring_s *rx_ring, @@ -431,7 +450,7 @@ static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic, prog = READ_ONCE(rx_ring->xdp_prog); if (!prog) - goto pass; + return aq_xdp_build_skb(xdp, aq_nic->ndev, buff); prefetchw(xdp->data_hard_start); /* xdp_frame write */ @@ -442,17 +461,12 @@ static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic, act = bpf_prog_run_xdp(prog, xdp); switch (act) { case XDP_PASS: -pass: - xdpf = xdp_convert_buff_to_frame(xdp); - if (unlikely(!xdpf)) - goto out_aborted; - skb = xdp_build_skb_from_frame(xdpf, aq_nic->ndev); + skb = aq_xdp_build_skb(xdp, aq_nic->ndev, buff); if (!skb) goto out_aborted; u64_stats_update_begin(&rx_ring->stats.rx.syncp); ++rx_ring->stats.rx.xdp_pass; u64_stats_update_end(&rx_ring->stats.rx.syncp); - aq_get_rxpages_xdp(buff, xdp); return skb; case XDP_TX: xdpf = xdp_convert_buff_to_frame(xdp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 808236dc898b..e2e2c986c82b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6990,11 +6990,9 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; } - if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) { + if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) bp->flags |= BNXT_FLAG_MULTI_HOST; - if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) - bp->fw_cap &= ~BNXT_FW_CAP_PTP_RTC; - } + if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED) bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index dcb09fbe4007..c0628ac1b798 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -2000,6 +2000,8 @@ struct bnxt { u32 fw_dbg_cap; #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) +#define BNXT_PTP_USE_RTC(bp) (!BNXT_MH(bp) && \ + ((bp)->fw_cap & BNXT_FW_CAP_PTP_RTC)) u32 hwrm_spec_code; u16 hwrm_cmd_seq; u16 hwrm_cmd_kong_seq; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index 4ec8bba18cdd..a3a3978a4d1c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -63,7 +63,7 @@ static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info, ptp_info); u64 ns = timespec64_to_ns(ts); - if (ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC) + if (BNXT_PTP_USE_RTC(ptp->bp)) return bnxt_ptp_cfg_settime(ptp->bp, ns); spin_lock_bh(&ptp->ptp_lock); @@ -196,7 +196,7 @@ static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, ptp_info); - if (ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC) + if (BNXT_PTP_USE_RTC(ptp->bp)) return bnxt_ptp_adjphc(ptp, delta); spin_lock_bh(&ptp->ptp_lock); @@ -205,34 +205,39 @@ static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) return 0; } +static int bnxt_ptp_adjfine_rtc(struct bnxt *bp, long scaled_ppm) +{ + s32 ppb = scaled_ppm_to_ppb(scaled_ppm); + struct hwrm_port_mac_cfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); + if (rc) + return rc; + + req->ptp_freq_adj_ppb = cpu_to_le32(ppb); + req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB); + rc = hwrm_req_send(bp, req); + if (rc) + netdev_err(bp->dev, + "ptp adjfine failed. rc = %d\n", rc); + return rc; +} + static int bnxt_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) { struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, ptp_info); - struct hwrm_port_mac_cfg_input *req; struct bnxt *bp = ptp->bp; - int rc = 0; - if (!(ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)) { - spin_lock_bh(&ptp->ptp_lock); - timecounter_read(&ptp->tc); - ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm); - spin_unlock_bh(&ptp->ptp_lock); - } else { - s32 ppb = scaled_ppm_to_ppb(scaled_ppm); - - rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); - if (rc) - return rc; + if (BNXT_PTP_USE_RTC(bp)) + return bnxt_ptp_adjfine_rtc(bp, scaled_ppm); - req->ptp_freq_adj_ppb = cpu_to_le32(ppb); - req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB); - rc = hwrm_req_send(ptp->bp, req); - if (rc) - netdev_err(ptp->bp->dev, - "ptp adjfine failed. rc = %d\n", rc); - } - return rc; + spin_lock_bh(&ptp->ptp_lock); + timecounter_read(&ptp->tc); + ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm); + spin_unlock_bh(&ptp->ptp_lock); + return 0; } void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2) @@ -879,7 +884,7 @@ int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg) u64 ns; int rc; - if (!bp->ptp_cfg || !(bp->fw_cap & BNXT_FW_CAP_PTP_RTC)) + if (!bp->ptp_cfg || !BNXT_PTP_USE_RTC(bp)) return -ENODEV; if (!phc_cfg) { @@ -932,13 +937,14 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS); spin_lock_init(&ptp->ptp_lock); - if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) { + if (BNXT_PTP_USE_RTC(bp)) { bnxt_ptp_timecounter_init(bp, false); rc = bnxt_ptp_init_rtc(bp, phc_cfg); if (rc) goto out; } else { bnxt_ptp_timecounter_init(bp, true); + bnxt_ptp_adjfine_rtc(bp, 0); } ptp->ptp_info = bnxt_ptp_caps; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 6e141a8bbf43..66e30561569e 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -4990,7 +4990,7 @@ static int macb_probe(struct platform_device *pdev) bp->jumbo_max_len = macb_config->jumbo_max_len; bp->wol = 0; - if (of_get_property(np, "magic-packet", NULL)) + if (of_property_read_bool(np, "magic-packet")) bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index e5c71f907852..d8d71bf97983 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -735,12 +735,17 @@ static int nicvf_set_channels(struct net_device *dev, if (channel->tx_count > nic->max_queues) return -EINVAL; - if (nic->xdp_prog && - ((channel->tx_count + channel->rx_count) > nic->max_queues)) { - netdev_err(nic->netdev, - "XDP mode, RXQs + TXQs > Max %d\n", - nic->max_queues); - return -EINVAL; + if (channel->tx_count + channel->rx_count > nic->max_queues) { + if (nic->xdp_prog) { + netdev_err(nic->netdev, + "XDP mode, RXQs + TXQs > Max %d\n", + nic->max_queues); + return -EINVAL; + } + + xdp_clear_features_flag(nic->netdev); + } else if (!pass1_silicon(nic->pdev)) { + xdp_set_features_flag(dev, NETDEV_XDP_ACT_BASIC); } if (if_up) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 8b25313c7f6b..eff350e0bc2a 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -2218,7 +2218,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->netdev_ops = &nicvf_netdev_ops; netdev->watchdog_timeo = NICVF_TX_TIMEOUT; - netdev->xdp_features = NETDEV_XDP_ACT_BASIC; + if (!pass1_silicon(nic->pdev) && + nic->rx_queues + nic->tx_queues <= nic->max_queues) + netdev->xdp_features = NETDEV_XDP_ACT_BASIC; /* MTU range: 64 - 9200 */ netdev->min_mtu = NIC_HW_MIN_FRS; diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index b21e56de6167..05a89ab6766c 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1393,9 +1393,9 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev) if (!pdata) return ERR_PTR(-ENOMEM); - if (of_find_property(np, "davicom,ext-phy", NULL)) + if (of_property_read_bool(np, "davicom,ext-phy")) pdata->flags |= DM9000_PLATF_EXT_PHY; - if (of_find_property(np, "davicom,no-eeprom", NULL)) + if (of_property_read_bool(np, "davicom,no-eeprom")) pdata->flags |= DM9000_PLATF_NO_EEPROM; ret = of_get_mac_address(np, pdata->dev_addr); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index bca68edfbe9c..da9d4b310fcd 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -370,8 +370,7 @@ static const struct ethtool_rmon_hist_range enetc_rmon_ranges[] = { }; static void enetc_rmon_stats(struct enetc_hw *hw, int mac, - struct ethtool_rmon_stats *s, - const struct ethtool_rmon_hist_range **ranges) + struct ethtool_rmon_stats *s) { s->undersize_pkts = enetc_port_rd(hw, ENETC_PM_RUND(mac)); s->oversize_pkts = enetc_port_rd(hw, ENETC_PM_ROVR(mac)); @@ -393,8 +392,6 @@ static void enetc_rmon_stats(struct enetc_hw *hw, int mac, s->hist_tx[4] = enetc_port_rd(hw, ENETC_PM_T1023(mac)); s->hist_tx[5] = enetc_port_rd(hw, ENETC_PM_T1522(mac)); s->hist_tx[6] = enetc_port_rd(hw, ENETC_PM_T1523X(mac)); - - *ranges = enetc_rmon_ranges; } static void enetc_get_eth_mac_stats(struct net_device *ndev, @@ -447,13 +444,15 @@ static void enetc_get_rmon_stats(struct net_device *ndev, struct enetc_hw *hw = &priv->si->hw; struct enetc_si *si = priv->si; + *ranges = enetc_rmon_ranges; + switch (rmon_stats->src) { case ETHTOOL_MAC_STATS_SRC_EMAC: - enetc_rmon_stats(hw, 0, rmon_stats, ranges); + enetc_rmon_stats(hw, 0, rmon_stats); break; case ETHTOOL_MAC_STATS_SRC_PMAC: if (si->hw_features & ENETC_SI_F_QBU) - enetc_rmon_stats(hw, 1, rmon_stats, ranges); + enetc_rmon_stats(hw, 1, rmon_stats); break; case ETHTOOL_MAC_STATS_SRC_AGGREGATE: ethtool_aggregate_rmon_stats(ndev, rmon_stats); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index c73e25f8995e..f3b16a6673e2 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -4251,7 +4251,7 @@ fec_probe(struct platform_device *pdev) if (ret) goto failed_ipc_init; - if (of_get_property(np, "fsl,magic-packet", NULL)) + if (of_property_read_bool(np, "fsl,magic-packet")) fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; ret = fec_enet_init_stop_mode(fep, np); diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index a7f4c3c29f3e..b88816b71ddf 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -937,7 +937,7 @@ static int mpc52xx_fec_probe(struct platform_device *op) priv->phy_node = of_parse_phandle(np, "phy-handle", 0); /* the 7-wire property means don't use MII mode */ - if (of_find_property(np, "fsl,7-wire-mode", NULL)) { + if (of_property_read_bool(np, "fsl,7-wire-mode")) { priv->seven_wire_mode = 1; dev_info(&ndev->dev, "using 7-wire PHY mode\n"); } diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index b2def295523a..38d5013c6fed 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -787,10 +787,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) else priv->interface = gfar_get_interface(dev); - if (of_find_property(np, "fsl,magic-packet", NULL)) + if (of_property_read_bool(np, "fsl,magic-packet")) priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; - if (of_get_property(np, "fsl,wake-on-filer", NULL)) + if (of_property_read_bool(np, "fsl,wake-on-filer")) priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER; priv->phy_node = of_parse_phandle(np, "phy-handle", 0); diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index ce574d097e28..5f81470843b4 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -537,7 +537,10 @@ static int gve_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct gve_priv *priv = netdev_priv(netdev); - int err = gve_adminq_report_link_speed(priv); + int err = 0; + + if (priv->link_speed == 0) + err = gve_adminq_report_link_speed(priv); cmd->base.speed = priv->link_speed; return err; diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c index daec9ce04531..54bb4d9a0d1e 100644 --- a/drivers/net/ethernet/i825xx/sni_82596.c +++ b/drivers/net/ethernet/i825xx/sni_82596.c @@ -78,6 +78,7 @@ static int sni_82596_probe(struct platform_device *dev) void __iomem *mpu_addr; void __iomem *ca_addr; u8 __iomem *eth_addr; + u8 mac[ETH_ALEN]; res = platform_get_resource(dev, IORESOURCE_MEM, 0); ca = platform_get_resource(dev, IORESOURCE_MEM, 1); @@ -109,12 +110,13 @@ static int sni_82596_probe(struct platform_device *dev) goto probe_failed; /* someone seems to like messed up stuff */ - netdevice->dev_addr[0] = readb(eth_addr + 0x0b); - netdevice->dev_addr[1] = readb(eth_addr + 0x0a); - netdevice->dev_addr[2] = readb(eth_addr + 0x09); - netdevice->dev_addr[3] = readb(eth_addr + 0x08); - netdevice->dev_addr[4] = readb(eth_addr + 0x07); - netdevice->dev_addr[5] = readb(eth_addr + 0x06); + mac[0] = readb(eth_addr + 0x0b); + mac[1] = readb(eth_addr + 0x0a); + mac[2] = readb(eth_addr + 0x09); + mac[3] = readb(eth_addr + 0x08); + mac[4] = readb(eth_addr + 0x07); + mac[5] = readb(eth_addr + 0x06); + eth_hw_addr_set(netdevice, mac); iounmap(eth_addr); if (netdevice->irq < 0) { diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 9b08e41ccc29..c97095abd26a 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2939,9 +2939,9 @@ static int emac_init_config(struct emac_instance *dev) } /* Fixup some feature bits based on the device tree */ - if (of_get_property(np, "has-inverted-stacr-oc", NULL)) + if (of_property_read_bool(np, "has-inverted-stacr-oc")) dev->features |= EMAC_FTR_STACR_OC_INVERT; - if (of_get_property(np, "has-new-stacr-staopc", NULL)) + if (of_property_read_bool(np, "has-new-stacr-staopc")) dev->features |= EMAC_FTR_HAS_NEW_STACR; /* CAB lacks the appropriate properties */ @@ -3042,7 +3042,7 @@ static int emac_probe(struct platform_device *ofdev) * property here for now, but new flat device trees should set a * status property to "disabled" instead. */ - if (of_get_property(np, "unused", NULL) || !of_device_is_available(np)) + if (of_property_read_bool(np, "unused") || !of_device_is_available(np)) return -ENODEV; /* Find ourselves in the bootlist if we are there */ @@ -3333,7 +3333,7 @@ static void __init emac_make_bootlist(void) if (of_match_node(emac_match, np) == NULL) continue; - if (of_get_property(np, "unused", NULL)) + if (of_property_read_bool(np, "unused")) continue; idx = of_get_property(np, "cell-index", NULL); if (idx == NULL) diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c index 242ef976fd15..50358cf00130 100644 --- a/drivers/net/ethernet/ibm/emac/rgmii.c +++ b/drivers/net/ethernet/ibm/emac/rgmii.c @@ -242,7 +242,7 @@ static int rgmii_probe(struct platform_device *ofdev) } /* Check for RGMII flags */ - if (of_get_property(ofdev->dev.of_node, "has-mdio", NULL)) + if (of_property_read_bool(ofdev->dev.of_node, "has-mdio")) dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO; /* CAB lacks the right properties, fix this up */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 467001db5070..228cd502bb48 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -15525,6 +15525,7 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw) int err; int v_idx; + pci_set_drvdata(pf->pdev, pf); pci_save_state(pf->pdev); /* set up periodic task facility */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 924f972b91fa..72b091f2509d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -171,10 +171,10 @@ static char *i40e_create_dummy_packet(u8 *dummy_packet, bool ipv4, u8 l4proto, struct i40e_fdir_filter *data) { bool is_vlan = !!data->vlan_tag; - struct vlan_hdr vlan; - struct ipv6hdr ipv6; - struct ethhdr eth; - struct iphdr ip; + struct vlan_hdr vlan = {}; + struct ipv6hdr ipv6 = {}; + struct ethhdr eth = {}; + struct iphdr ip = {}; u8 *tmp; if (ipv4) { diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c index 16c490965b61..dd11dbbd5551 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_common.c +++ b/drivers/net/ethernet/intel/iavf/iavf_common.c @@ -661,7 +661,7 @@ struct iavf_rx_ptype_decoded iavf_ptype_lookup[BIT(8)] = { /* Non Tunneled IPv6 */ IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), + IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), IAVF_PTT_UNUSED_ENTRY(91), IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 3273aeb8fa67..095201e83c9d 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -893,6 +893,10 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); + /* Do not track VLAN 0 filter, always added by the PF on VF init */ + if (!vid) + return 0; + if (!VLAN_FILTERING_ALLOWED(adapter)) return -EIO; @@ -919,6 +923,10 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); + /* We do not track VLAN 0 filter */ + if (!vid) + return 0; + iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))); if (proto == cpu_to_be16(ETH_P_8021Q)) clear_bit(vid, adapter->vsi.active_cvlans); @@ -5066,6 +5074,11 @@ static void iavf_remove(struct pci_dev *pdev) mutex_unlock(&adapter->crit_lock); break; } + /* Simply return if we already went through iavf_shutdown */ + if (adapter->state == __IAVF_REMOVE) { + mutex_unlock(&adapter->crit_lock); + return; + } mutex_unlock(&adapter->crit_lock); usleep_range(500, 1000); diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 18b6a702a1d6..e989feda133c 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -1096,7 +1096,7 @@ static inline void iavf_rx_hash(struct iavf_ring *ring, cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH << IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT); - if (ring->netdev->features & NETIF_F_RXHASH) + if (!(ring->netdev->features & NETIF_F_RXHASH)) return; if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 6d23338604bb..4e17d006c52d 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -2446,8 +2446,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, list_for_each_entry(f, &adapter->vlan_filter_list, list) { if (f->is_new_vlan) { f->is_new_vlan = false; - if (!f->vlan.vid) - continue; if (f->vlan.tpid == ETH_P_8021Q) set_bit(f->vlan.vid, adapter->vsi.active_cvlans); diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index b0e29e342401..e809249500e1 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -509,6 +509,7 @@ enum ice_pf_flags { ICE_FLAG_VF_VLAN_PRUNING, ICE_FLAG_LINK_LENIENT_MODE_ENA, ICE_FLAG_PLUG_AUX_DEV, + ICE_FLAG_UNPLUG_AUX_DEV, ICE_FLAG_MTU_CHANGED, ICE_FLAG_GNSS, /* GNSS successfully initialized */ ICE_PF_FLAGS_NBITS /* must be last */ @@ -955,16 +956,11 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf) */ static inline void ice_clear_rdma_cap(struct ice_pf *pf) { - /* We can directly unplug aux device here only if the flag bit - * ICE_FLAG_PLUG_AUX_DEV is not set because ice_unplug_aux_dev() - * could race with ice_plug_aux_dev() called from - * ice_service_task(). In this case we only clear that bit now and - * aux device will be unplugged later once ice_plug_aux_device() - * called from ice_service_task() finishes (see ice_service_task()). + /* defer unplug to service task to avoid RTNL lock and + * clear PLUG bit so that pending plugs don't interfere */ - if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) - ice_unplug_aux_dev(pf); - + clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags); + set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags); clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); } #endif /* _ICE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 0f52ea38b6f3..450317dfcca7 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -291,6 +291,7 @@ static void ice_vsi_delete_from_hw(struct ice_vsi *vsi) struct ice_vsi_ctx *ctxt; int status; + ice_fltr_remove_all(vsi); ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) return; @@ -2892,7 +2893,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi) !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) ice_cfg_sw_lldp(vsi, false, false); - ice_fltr_remove_all(vsi); ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); if (err) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 567694bf098b..0d8b8c6f9bd3 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2316,18 +2316,15 @@ static void ice_service_task(struct work_struct *work) } } - if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) { - /* Plug aux device per request */ - ice_plug_aux_dev(pf); + /* unplug aux dev per request, if an unplug request came in + * while processing a plug request, this will handle it + */ + if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags)) + ice_unplug_aux_dev(pf); - /* Mark plugging as done but check whether unplug was - * requested during ice_plug_aux_dev() call - * (e.g. from ice_clear_rdma_cap()) and if so then - * plug aux device. - */ - if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) - ice_unplug_aux_dev(pf); - } + /* Plug aux device per request */ + if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) + ice_plug_aux_dev(pf); if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { struct iidc_event *event; @@ -4644,6 +4641,12 @@ static int ice_start_eth(struct ice_vsi *vsi) return err; } +static void ice_stop_eth(struct ice_vsi *vsi) +{ + ice_fltr_remove_all(vsi); + ice_vsi_close(vsi); +} + static int ice_init_eth(struct ice_pf *pf) { struct ice_vsi *vsi = ice_get_main_vsi(pf); @@ -5132,7 +5135,7 @@ void ice_unload(struct ice_pf *pf) { ice_deinit_features(pf); ice_deinit_rdma(pf); - ice_vsi_close(ice_get_main_vsi(pf)); + ice_stop_eth(ice_get_main_vsi(pf)); ice_vsi_decfg(ice_get_main_vsi(pf)); ice_deinit_dev(pf); } diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 96a64c25e2ef..0cc05e54a781 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -1341,15 +1341,15 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) struct ice_vf *vf; int ret; + vf = ice_get_vf_by_id(pf, vf_id); + if (!vf) + return -EINVAL; + if (ice_is_eswitch_mode_switchdev(pf)) { dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n"); return -EOPNOTSUPP; } - vf = ice_get_vf_by_id(pf, vf_id); - if (!vf) - return -EINVAL; - ret = ice_check_vf_ready_for_cfg(vf); if (ret) goto out_put_vf; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index dfd22862e926..b61dd9f01540 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1210,6 +1210,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc); if (++ntc == cnt) ntc = 0; + rx_ring->first_desc = ntc; continue; } diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 31565bbafa22..d1e489da7363 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -184,8 +184,6 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) } netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); - ice_qvec_dis_irq(vsi, rx_ring, q_vector); - ice_fill_txq_meta(vsi, tx_ring, &txq_meta); err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta); if (err) @@ -200,10 +198,11 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) if (err) return err; } + ice_qvec_dis_irq(vsi, rx_ring, q_vector); + err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true); if (err) return err; - ice_clean_rx_ring(rx_ring); ice_qvec_toggle_napi(vsi, q_vector, false); ice_qp_clean_rings(vsi, q_idx); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 03bc1e8af575..274c781b5547 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -109,6 +109,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *); static void igb_setup_mrqc(struct igb_adapter *); static int igb_probe(struct pci_dev *, const struct pci_device_id *); static void igb_remove(struct pci_dev *pdev); +static void igb_init_queue_configuration(struct igb_adapter *adapter); static int igb_sw_init(struct igb_adapter *); int igb_open(struct net_device *); int igb_close(struct net_device *); @@ -175,9 +176,7 @@ static void igb_nfc_filter_restore(struct igb_adapter *adapter); #ifdef CONFIG_PCI_IOV static int igb_vf_configure(struct igb_adapter *adapter, int vf); -static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs); -static int igb_disable_sriov(struct pci_dev *dev); -static int igb_pci_disable_sriov(struct pci_dev *dev); +static int igb_disable_sriov(struct pci_dev *dev, bool reinit); #endif static int igb_suspend(struct device *); @@ -3665,7 +3664,7 @@ err_sw_init: kfree(adapter->shadow_vfta); igb_clear_interrupt_scheme(adapter); #ifdef CONFIG_PCI_IOV - igb_disable_sriov(pdev); + igb_disable_sriov(pdev, false); #endif pci_iounmap(pdev, adapter->io_addr); err_ioremap: @@ -3679,7 +3678,38 @@ err_dma: } #ifdef CONFIG_PCI_IOV -static int igb_disable_sriov(struct pci_dev *pdev) +static int igb_sriov_reinit(struct pci_dev *dev) +{ + struct net_device *netdev = pci_get_drvdata(dev); + struct igb_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + + rtnl_lock(); + + if (netif_running(netdev)) + igb_close(netdev); + else + igb_reset(adapter); + + igb_clear_interrupt_scheme(adapter); + + igb_init_queue_configuration(adapter); + + if (igb_init_interrupt_scheme(adapter, true)) { + rtnl_unlock(); + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + igb_open(netdev); + + rtnl_unlock(); + + return 0; +} + +static int igb_disable_sriov(struct pci_dev *pdev, bool reinit) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); @@ -3713,10 +3743,10 @@ static int igb_disable_sriov(struct pci_dev *pdev) adapter->flags |= IGB_FLAG_DMAC; } - return 0; + return reinit ? igb_sriov_reinit(pdev) : 0; } -static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) +static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs, bool reinit) { struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); @@ -3781,12 +3811,6 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) "Unable to allocate memory for VF MAC filter list\n"); } - /* only call pci_enable_sriov() if no VFs are allocated already */ - if (!old_vfs) { - err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); - if (err) - goto err_out; - } dev_info(&pdev->dev, "%d VFs allocated\n", adapter->vfs_allocated_count); for (i = 0; i < adapter->vfs_allocated_count; i++) @@ -3794,6 +3818,17 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) /* DMA Coalescing is not supported in IOV mode. */ adapter->flags &= ~IGB_FLAG_DMAC; + + if (reinit) { + err = igb_sriov_reinit(pdev); + if (err) + goto err_out; + } + + /* only call pci_enable_sriov() if no VFs are allocated already */ + if (!old_vfs) + err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); + goto out; err_out: @@ -3863,9 +3898,7 @@ static void igb_remove(struct pci_dev *pdev) igb_release_hw_control(adapter); #ifdef CONFIG_PCI_IOV - rtnl_lock(); - igb_disable_sriov(pdev); - rtnl_unlock(); + igb_disable_sriov(pdev, false); #endif unregister_netdev(netdev); @@ -3911,7 +3944,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter) igb_reset_interrupt_capability(adapter); pci_sriov_set_totalvfs(pdev, 7); - igb_enable_sriov(pdev, max_vfs); + igb_enable_sriov(pdev, max_vfs, false); #endif /* CONFIG_PCI_IOV */ } @@ -9520,71 +9553,17 @@ static void igb_shutdown(struct pci_dev *pdev) } } -#ifdef CONFIG_PCI_IOV -static int igb_sriov_reinit(struct pci_dev *dev) -{ - struct net_device *netdev = pci_get_drvdata(dev); - struct igb_adapter *adapter = netdev_priv(netdev); - struct pci_dev *pdev = adapter->pdev; - - rtnl_lock(); - - if (netif_running(netdev)) - igb_close(netdev); - else - igb_reset(adapter); - - igb_clear_interrupt_scheme(adapter); - - igb_init_queue_configuration(adapter); - - if (igb_init_interrupt_scheme(adapter, true)) { - rtnl_unlock(); - dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); - return -ENOMEM; - } - - if (netif_running(netdev)) - igb_open(netdev); - - rtnl_unlock(); - - return 0; -} - -static int igb_pci_disable_sriov(struct pci_dev *dev) -{ - int err = igb_disable_sriov(dev); - - if (!err) - err = igb_sriov_reinit(dev); - - return err; -} - -static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs) -{ - int err = igb_enable_sriov(dev, num_vfs); - - if (err) - goto out; - - err = igb_sriov_reinit(dev); - if (!err) - return num_vfs; - -out: - return err; -} - -#endif static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs) { #ifdef CONFIG_PCI_IOV - if (num_vfs == 0) - return igb_pci_disable_sriov(dev); - else - return igb_pci_enable_sriov(dev, num_vfs); + int err; + + if (num_vfs == 0) { + return igb_disable_sriov(dev, true); + } else { + err = igb_enable_sriov(dev, num_vfs, true); + return err ? err : num_vfs; + } #endif return 0; } diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 3a32809510fc..72cb1b56e9f2 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1074,7 +1074,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) igbvf_intr_msix_rx, 0, adapter->rx_ring->name, netdev); if (err) - goto out; + goto free_irq_tx; adapter->rx_ring->itr_register = E1000_EITR(vector); adapter->rx_ring->itr_val = adapter->current_itr; @@ -1083,10 +1083,14 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) err = request_irq(adapter->msix_entries[vector].vector, igbvf_msix_other, 0, netdev->name, netdev); if (err) - goto out; + goto free_irq_rx; igbvf_configure_msix(adapter); return 0; +free_irq_rx: + free_irq(adapter->msix_entries[--vector].vector, netdev); +free_irq_tx: + free_irq(adapter->msix_entries[--vector].vector, netdev); out: return err; } diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c index b8ba3f94c363..a47a2e3e548c 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.c +++ b/drivers/net/ethernet/intel/igbvf/vf.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2009 - 2018 Intel Corporation. */ +#include <linux/etherdevice.h> + #include "vf.h" static s32 e1000_check_for_link_vf(struct e1000_hw *hw); @@ -131,11 +133,16 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) /* set our "perm_addr" based on info provided by PF */ ret_val = mbx->ops.read_posted(hw, msgbuf, 3); if (!ret_val) { - if (msgbuf[0] == (E1000_VF_RESET | - E1000_VT_MSGTYPE_ACK)) + switch (msgbuf[0]) { + case E1000_VF_RESET | E1000_VT_MSGTYPE_ACK: memcpy(hw->mac.perm_addr, addr, ETH_ALEN); - else + break; + case E1000_VF_RESET | E1000_VT_MSGTYPE_NACK: + eth_zero_addr(hw->mac.perm_addr); + break; + default: ret_val = -E1000_ERR_MAC_INIT; + } } } diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 2928a6c73692..25fc6c65209b 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -6010,18 +6010,18 @@ static bool validate_schedule(struct igc_adapter *adapter, if (e->command != TC_TAPRIO_CMD_SET_GATES) return false; - for (i = 0; i < adapter->num_tx_queues; i++) { - if (e->gate_mask & BIT(i)) + for (i = 0; i < adapter->num_tx_queues; i++) + if (e->gate_mask & BIT(i)) { queue_uses[i]++; - /* There are limitations: A single queue cannot be - * opened and closed multiple times per cycle unless the - * gate stays open. Check for it. - */ - if (queue_uses[i] > 1 && - !(prev->gate_mask & BIT(i))) - return false; - } + /* There are limitations: A single queue cannot + * be opened and closed multiple times per cycle + * unless the gate stays open. Check for it. + */ + if (queue_uses[i] > 1 && + !(prev->gate_mask & BIT(i))) + return false; + } } return true; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 9b4ecbe4f36d..3ea00bc9b91c 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -4996,6 +4996,14 @@ static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu) for (i = 0; i < priv->port_count; i++) { port = priv->port_list[i]; + if (percpu && port->ntxqs >= num_possible_cpus() * 2) + xdp_set_features_flag(port->dev, + NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_NDO_XMIT); + else + xdp_clear_features_flag(port->dev); + mvpp2_swf_bm_pool_init(port); if (status[i]) mvpp2_open(port->dev); @@ -6863,13 +6871,14 @@ static int mvpp2_port_probe(struct platform_device *pdev, if (!port->priv->percpu_pools) mvpp2_set_hw_csum(port, port->pool_long->id); + else if (port->ntxqs >= num_possible_cpus() * 2) + dev->xdp_features = NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_NDO_XMIT; dev->vlan_features |= features; netif_set_tso_max_segs(dev, MVPP2_MAX_TSO_SEGS); - dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | - NETDEV_XDP_ACT_NDO_XMIT; - dev->priv_flags |= IFF_UNICAST_FLT; /* MTU range: 68 - 9704 */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 7f8ffbf79cf7..ab126f8706c7 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -709,6 +709,7 @@ err_unreg_netdev: err_ptp_destroy: otx2_ptp_destroy(vf); err_detach_rsrc: + free_percpu(vf->hw.lmt_info); if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) qmem_free(vf->dev, vf->dync_lmt); otx2_detach_resources(&vf->mbox); @@ -762,6 +763,7 @@ static void otx2vf_remove(struct pci_dev *pdev) otx2_shutdown_tc(vf); otx2vf_disable_mbox_intr(vf); otx2_detach_resources(&vf->mbox); + free_percpu(vf->hw.lmt_info); if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) qmem_free(vf->dev, vf->dync_lmt); otx2vf_vfaf_mbox_destroy(vf); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index b65de174c3d9..084a6badef6d 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -542,6 +542,10 @@ #define SGMII_SEND_AN_ERROR_EN BIT(11) #define SGMII_IF_MODE_MASK GENMASK(5, 1) +/* Register to reset SGMII design */ +#define SGMII_RESERVED_0 0x34 +#define SGMII_SW_RESET BIT(0) + /* Register to set SGMII speed, ANA RG_ Control Signals III*/ #define SGMSYS_ANA_RG_CS3 0x2028 #define RG_PHY_SPEED_MASK (BIT(2) | BIT(3)) diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c index bb00de1003ac..83976dc86887 100644 --- a/drivers/net/ethernet/mediatek/mtk_sgmii.c +++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c @@ -38,20 +38,16 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode, const unsigned long *advertising, bool permit_pause_to_mac) { + bool mode_changed = false, changed, use_an; struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs); unsigned int rgc3, sgm_mode, bmcr; int advertise, link_timer; - bool changed, use_an; advertise = phylink_mii_c22_pcs_encode_advertisement(interface, advertising); if (advertise < 0) return advertise; - link_timer = phylink_get_link_timer_ns(interface); - if (link_timer < 0) - return link_timer; - /* Clearing IF_MODE_BIT0 switches the PCS to BASE-X mode, and * we assume that fixes it's speed at bitrate = line rate (in * other words, 1000Mbps or 2500Mbps). @@ -77,17 +73,24 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode, } if (use_an) { - /* FIXME: Do we need to set AN_RESTART here? */ - bmcr = SGMII_AN_RESTART | SGMII_AN_ENABLE; + bmcr = SGMII_AN_ENABLE; } else { bmcr = 0; } if (mpcs->interface != interface) { + link_timer = phylink_get_link_timer_ns(interface); + if (link_timer < 0) + return link_timer; + /* PHYA power down */ regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, SGMII_PHYA_PWD, SGMII_PHYA_PWD); + /* Reset SGMII PCS state */ + regmap_update_bits(mpcs->regmap, SGMII_RESERVED_0, + SGMII_SW_RESET, SGMII_SW_RESET); + if (interface == PHY_INTERFACE_MODE_2500BASEX) rgc3 = RG_PHY_SPEED_3_125G; else @@ -97,16 +100,17 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode, regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3, RG_PHY_SPEED_3_125G, rgc3); + /* Setup the link timer */ + regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, link_timer / 2 / 8); + mpcs->interface = interface; + mode_changed = true; } /* Update the advertisement, noting whether it has changed */ regmap_update_bits_check(mpcs->regmap, SGMSYS_PCS_ADVERTISE, SGMII_ADVERTISE, advertise, &changed); - /* Setup the link timer and QPHY power up inside SGMIISYS */ - regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, link_timer / 2 / 8); - /* Update the sgmsys mode register */ regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE, SGMII_REMOTE_FAULT_DIS | SGMII_SPEED_DUPLEX_AN | @@ -114,7 +118,7 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode, /* Update the BMCR */ regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1, - SGMII_AN_RESTART | SGMII_AN_ENABLE, bmcr); + SGMII_AN_ENABLE, bmcr); /* Release PHYA power down state * Only removing bit SGMII_PHYA_PWD isn't enough. @@ -128,7 +132,7 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode, usleep_range(50, 100); regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, 0); - return changed; + return changed || mode_changed; } static void mtk_pcs_restart_an(struct phylink_pcs *pcs) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 0869d4fff17b..4b5e459b6d49 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -674,7 +674,7 @@ int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp) struct mlx4_en_xdp_buff *_ctx = (void *)ctx; if (unlikely(_ctx->ring->hwtstamp_rx_filter != HWTSTAMP_FILTER_ALL)) - return -EOPNOTSUPP; + return -ENODATA; *timestamp = mlx4_en_get_hwtstamp(_ctx->mdev, mlx4_en_get_cqe_ts(_ctx->cqe)); @@ -686,7 +686,7 @@ int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash) struct mlx4_en_xdp_buff *_ctx = (void *)ctx; if (unlikely(!(_ctx->dev->features & NETIF_F_RXHASH))) - return -EOPNOTSUPP; + return -ENODATA; *hash = be32_to_cpu(_ctx->cqe->immed_rss_invalid); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 88460b7796e5..4a19ef4a9811 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -313,7 +313,6 @@ struct mlx5e_params { } channel; } mqprio; bool rx_cqe_compress_def; - bool tunneled_offload_en; struct dim_cq_moder rx_cq_moderation; struct dim_cq_moder tx_cq_moderation; struct mlx5e_packet_merge_param packet_merge; @@ -1243,6 +1242,7 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 void mlx5e_rx_dim_work(struct work_struct *work); void mlx5e_tx_dim_work(struct work_struct *work); +void mlx5e_set_xdp_feature(struct net_device *netdev); netdev_features_t mlx5e_features_check(struct sk_buff *skb, struct net_device *netdev, netdev_features_t features); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c index c4378afdec09..1bd1c94fb977 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c @@ -178,7 +178,6 @@ tc_act_police_stats(struct mlx5e_priv *priv, meter = mlx5e_tc_meter_get(priv->mdev, ¶ms); if (IS_ERR(meter)) { NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter"); - mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index); return PTR_ERR(meter); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c index 626cb7470fa5..07c1895a2b23 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c @@ -64,6 +64,7 @@ mlx5e_tc_act_stats_add(struct mlx5e_tc_act_stats_handle *handle, { struct mlx5e_tc_act_stats *act_stats, *old_act_stats; struct rhashtable *ht = &handle->ht; + u64 lastused; int err = 0; act_stats = kvzalloc(sizeof(*act_stats), GFP_KERNEL); @@ -73,6 +74,10 @@ mlx5e_tc_act_stats_add(struct mlx5e_tc_act_stats_handle *handle, act_stats->tc_act_cookie = act_cookie; act_stats->counter = counter; + mlx5_fc_query_cached_raw(counter, + &act_stats->lastbytes, + &act_stats->lastpackets, &lastused); + rcu_read_lock(); old_act_stats = rhashtable_lookup_get_insert_fast(ht, &act_stats->hash, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index bcd6370de440..c5dae48b7932 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -162,7 +162,7 @@ static int mlx5e_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp) const struct mlx5e_xdp_buff *_ctx = (void *)ctx; if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->tstamp))) - return -EOPNOTSUPP; + return -ENODATA; *timestamp = mlx5e_cqe_ts_to_ns(_ctx->rq->ptp_cyc2time, _ctx->rq->clock, get_cqe_ts(_ctx->cqe)); @@ -174,7 +174,7 @@ static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash) const struct mlx5e_xdp_buff *_ctx = (void *)ctx; if (unlikely(!(_ctx->xdp.rxq->dev->features & NETIF_F_RXHASH))) - return -EOPNOTSUPP; + return -ENODATA; *hash = be32_to_cpu(_ctx->cqe->rss_hash_result); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index 4be770443b0c..9b597cb24598 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -621,15 +621,6 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, if (unlikely(!priv_rx)) return -ENOMEM; - dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info); - if (IS_ERR(dek)) { - err = PTR_ERR(dek); - goto err_create_key; - } - priv_rx->dek = dek; - - INIT_LIST_HEAD(&priv_rx->list); - spin_lock_init(&priv_rx->lock); switch (crypto_info->cipher_type) { case TLS_CIPHER_AES_GCM_128: priv_rx->crypto_info.crypto_info_128 = @@ -642,9 +633,20 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, default: WARN_ONCE(1, "Unsupported cipher type %u\n", crypto_info->cipher_type); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto err_cipher_type; } + dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info); + if (IS_ERR(dek)) { + err = PTR_ERR(dek); + goto err_cipher_type; + } + priv_rx->dek = dek; + + INIT_LIST_HEAD(&priv_rx->list); + spin_lock_init(&priv_rx->lock); + rxq = mlx5e_ktls_sk_get_rxq(sk); priv_rx->rxq = rxq; priv_rx->sk = sk; @@ -677,7 +679,7 @@ err_post_wqes: mlx5e_tir_destroy(&priv_rx->tir); err_create_tir: mlx5_ktls_destroy_key(priv->tls->dek_pool, priv_rx->dek); -err_create_key: +err_cipher_type: kfree(priv_rx); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 60b3e08a1028..0e4c0a093293 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -469,14 +469,6 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk, if (IS_ERR(priv_tx)) return PTR_ERR(priv_tx); - dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info); - if (IS_ERR(dek)) { - err = PTR_ERR(dek); - goto err_create_key; - } - priv_tx->dek = dek; - - priv_tx->expected_seq = start_offload_tcp_sn; switch (crypto_info->cipher_type) { case TLS_CIPHER_AES_GCM_128: priv_tx->crypto_info.crypto_info_128 = @@ -489,8 +481,18 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk, default: WARN_ONCE(1, "Unsupported cipher type %u\n", crypto_info->cipher_type); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto err_pool_push; } + + dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info); + if (IS_ERR(dek)) { + err = PTR_ERR(dek); + goto err_pool_push; + } + + priv_tx->dek = dek; + priv_tx->expected_seq = start_offload_tcp_sn; priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx); mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx); @@ -500,7 +502,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk, return 0; -err_create_key: +err_pool_push: pool_push(pool, priv_tx); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c index 08d0929e8260..33b3620ea45c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c @@ -89,8 +89,8 @@ struct mlx5e_macsec_rx_sc { }; struct mlx5e_macsec_umr { + u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(macsec_aso)]; dma_addr_t dma_addr; - u8 ctx[MLX5_ST_SZ_BYTES(macsec_aso)]; u32 mkey; }; @@ -1412,6 +1412,7 @@ static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *mac struct mlx5e_macsec_aso *aso; struct mlx5_aso_wqe *aso_wqe; struct mlx5_aso *maso; + unsigned long expires; int err; aso = &macsec->aso; @@ -1425,7 +1426,13 @@ static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *mac macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL); mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl); - err = mlx5_aso_poll_cq(maso, false); + expires = jiffies + msecs_to_jiffies(10); + do { + err = mlx5_aso_poll_cq(maso, false); + if (err) + usleep_range(2, 10); + } while (err && time_is_after_jiffies(expires)); + if (err) goto err_out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 2449731b7d79..89de92d06483 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -117,12 +117,14 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev, if (!MLX5_CAP_GEN(priv->mdev, ets)) return -EOPNOTSUPP; - ets->ets_cap = mlx5_max_tc(priv->mdev) + 1; - for (i = 0; i < ets->ets_cap; i++) { + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]); if (err) return err; + } + ets->ets_cap = mlx5_max_tc(priv->mdev) + 1; + for (i = 0; i < ets->ets_cap; i++) { err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 7708acc9b2ab..79fd21ecb9cb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1985,6 +1985,7 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_params new_params; + int err; if (enable) { /* Checking the regular RQ here; mlx5e_validate_xsk_param called @@ -2005,7 +2006,14 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_STRIDING_RQ, enable); mlx5e_set_rq_type(mdev, &new_params); - return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true); + err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true); + if (err) + return err; + + /* update XDP supported features */ + mlx5e_set_xdp_feature(netdev); + + return 0; } static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 76a9c5194a70..7ca7e9b57607 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4004,6 +4004,25 @@ static int mlx5e_handle_feature(struct net_device *netdev, return 0; } +void mlx5e_set_xdp_feature(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_params *params = &priv->channels.params; + xdp_features_t val; + + if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) { + xdp_clear_features_flag(netdev); + return; + } + + val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_XSK_ZEROCOPY | + NETDEV_XDP_ACT_NDO_XMIT; + if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) + val |= NETDEV_XDP_ACT_RX_SG; + xdp_set_features_flag(netdev, val); +} + int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) { netdev_features_t oper_features = features; @@ -4030,6 +4049,9 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) return -EINVAL; } + /* update XDP supported features */ + mlx5e_set_xdp_feature(netdev); + return 0; } @@ -4128,8 +4150,12 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, } } - if (mlx5e_is_uplink_rep(priv)) + if (mlx5e_is_uplink_rep(priv)) { features = mlx5e_fix_uplink_rep_features(netdev, features); + features |= NETIF_F_NETNS_LOCAL; + } else { + features &= ~NETIF_F_NETNS_LOCAL; + } mutex_unlock(&priv->state_lock); @@ -4147,13 +4173,17 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev, struct xsk_buff_pool *xsk_pool = mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, ix); struct mlx5e_xsk_param xsk; + int max_xdp_mtu; if (!xsk_pool) continue; mlx5e_build_xsk_param(xsk_pool, &xsk); + max_xdp_mtu = mlx5e_xdp_max_mtu(new_params, &xsk); - if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev)) { + /* Validate XSK params and XDP MTU in advance */ + if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev) || + new_params->sw_mtu > max_xdp_mtu) { u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk); int max_mtu_frame, max_mtu_page, max_mtu; @@ -4163,9 +4193,9 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev, */ max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr); max_mtu_page = MLX5E_HW2SW_MTU(new_params, SKB_MAX_HEAD(0)); - max_mtu = min(max_mtu_frame, max_mtu_page); + max_mtu = min3(max_mtu_frame, max_mtu_page, max_xdp_mtu); - netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u. Try MTU <= %d\n", + netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u or its redirection XDP program. Try MTU <= %d\n", new_params->sw_mtu, ix, max_mtu); return false; } @@ -4761,13 +4791,6 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) if (old_prog) bpf_prog_put(old_prog); - if (reset) { - if (prog) - xdp_features_set_redirect_target(netdev, true); - else - xdp_features_clear_redirect_target(netdev); - } - if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset) goto unlock; @@ -4964,8 +4987,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 /* TX inline */ mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); - params->tunneled_offload_en = mlx5_tunnel_inner_ft_supported(mdev); - /* AF_XDP */ params->xsk = xsk; @@ -5163,13 +5184,10 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; - netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | - NETDEV_XDP_ACT_XSK_ZEROCOPY | - NETDEV_XDP_ACT_RX_SG; - netdev->priv_flags |= IFF_UNICAST_FLT; netif_set_tso_max_size(netdev, GSO_MAX_SIZE); + mlx5e_set_xdp_feature(netdev); mlx5e_set_netdev_dev_addr(netdev); mlx5e_macsec_build_netdev(priv); mlx5e_ipsec_build_netdev(priv); @@ -5241,6 +5259,9 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); mlx5e_health_create_reporters(priv); + /* update XDP supported features */ + mlx5e_set_xdp_feature(netdev); + return 0; } @@ -5270,7 +5291,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) } features = MLX5E_RX_RES_FEATURE_PTP; - if (priv->channels.params.tunneled_offload_en) + if (mlx5_tunnel_inner_ft_supported(mdev)) features |= MLX5E_RX_RES_FEATURE_INNER_FT; err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features, priv->max_nch, priv->drop_rq.rqn, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 9b9203443085..8ff654b4e9e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -747,12 +747,14 @@ static void mlx5e_build_rep_params(struct net_device *netdev) /* RQ */ mlx5e_build_rq_params(mdev, params); + /* update XDP supported features */ + mlx5e_set_xdp_feature(netdev); + /* CQ moderation params */ params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); mlx5e_set_rx_cq_mode_params(params, cq_period_mode); params->mqprio.num_tc = 1; - params->tunneled_offload_en = false; if (rep->vport != MLX5_VPORT_UPLINK) params->vlan_strip_disable = true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 70b8d2dfa751..87a2850b32d0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1103,8 +1103,8 @@ static void mlx5e_hairpin_params_init(struct mlx5e_hairpin_params *hairpin_params, struct mlx5_core_dev *mdev) { + u32 link_speed = 0; u64 link_speed64; - u32 link_speed; hairpin_params->mdev = mdev; /* set hairpin pair per each 50Gbs share of the link */ @@ -3752,7 +3752,7 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr, parse_attr->filter_dev = attr->parse_attr->filter_dev; attr2->action = 0; attr2->counter = NULL; - attr->tc_act_cookies_count = 0; + attr2->tc_act_cookies_count = 0; attr2->flags = 0; attr2->parse_attr = parse_attr; attr2->dest_chain = 0; @@ -4304,6 +4304,7 @@ int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv, esw_attr->dest_int_port = dest_int_port; esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE; + esw_attr->split_count = out_index; /* Forward to root fdb for matching against the new source vport */ attr->dest_chain = 0; @@ -5304,8 +5305,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) mlx5e_tc_debugfs_init(tc, mlx5e_fs_get_debugfs_root(priv->fs)); tc->action_stats_handle = mlx5e_tc_act_stats_create(); - if (IS_ERR(tc->action_stats_handle)) + if (IS_ERR(tc->action_stats_handle)) { + err = PTR_ERR(tc->action_stats_handle); goto err_act_stats; + } return 0; @@ -5440,8 +5443,10 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv) } uplink_priv->action_stats_handle = mlx5e_tc_act_stats_create(); - if (IS_ERR(uplink_priv->action_stats_handle)) + if (IS_ERR(uplink_priv->action_stats_handle)) { + err = PTR_ERR(uplink_priv->action_stats_handle); goto err_action_counter; + } return 0; @@ -5463,6 +5468,16 @@ err_tun_mapping: void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv) { + struct mlx5e_rep_priv *rpriv; + struct mlx5_eswitch *esw; + struct mlx5e_priv *priv; + + rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv); + priv = netdev_priv(rpriv->netdev); + esw = priv->mdev->priv.eswitch; + + mlx5e_tc_clean_fdb_peer_flows(esw); + mlx5e_tc_tun_cleanup(uplink_priv->encap); mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c index d55775627a47..50d2ea323979 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c @@ -364,8 +364,7 @@ int mlx5_esw_acl_ingress_vport_metadata_update(struct mlx5_eswitch *esw, u16 vpo if (WARN_ON_ONCE(IS_ERR(vport))) { esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num); - err = PTR_ERR(vport); - goto out; + return PTR_ERR(vport); } esw_acl_ingress_ofld_rules_destroy(esw, vport); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 0f052513fefa..8bdf28762f41 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -959,6 +959,7 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num) */ esw_vport_change_handle_locked(vport); vport->enabled_events = 0; + esw_apply_vport_rx_mode(esw, vport, false, false); esw_vport_cleanup(esw, vport); esw->enabled_vports--; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index d766a64b1823..25a8076a77bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -723,11 +723,11 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; for (i = 0; i < esw_attr->split_count; i++) { - if (esw_is_indir_table(esw, attr)) - err = esw_setup_indir_table(dest, &flow_act, esw, attr, false, &i); - else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) - err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr, - &i); + if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) + /* Source port rewrite (forward to ovs internal port or statck device) isn't + * supported in the rule of split action. + */ + err = -EOPNOTSUPP; else esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false); @@ -3405,6 +3405,18 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode) return 0; } +static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink) +{ + struct net *devl_net, *netdev_net; + struct mlx5_eswitch *esw; + + esw = mlx5_devlink_eswitch_get(devlink); + netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev); + devl_net = devlink_net(devlink); + + return net_eq(devl_net, netdev_net); +} + int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, struct netlink_ext_ack *extack) { @@ -3419,6 +3431,13 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, if (esw_mode_from_devlink(mode, &mlx5_mode)) return -EINVAL; + if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && + !esw_offloads_devlink_ns_eq_netdev_ns(devlink)) { + NL_SET_ERR_MSG_MOD(extack, + "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's."); + return -EPERM; + } + mlx5_lag_disable_change(esw->dev); err = mlx5_esw_try_lock(esw); if (err < 0) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index c2a4f86bc890..baa7ef812313 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -70,7 +70,6 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev, params->packet_merge.type = MLX5E_PACKET_MERGE_NONE; params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN; - params->tunneled_offload_en = false; /* CQE compression is not supported for IPoIB */ params->rx_cqe_compress_def = false; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 540840e80493..f1de152a6113 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1364,8 +1364,8 @@ static void mlx5_unload(struct mlx5_core_dev *dev) { mlx5_devlink_traps_unregister(priv_to_devlink(dev)); mlx5_sf_dev_table_destroy(dev); - mlx5_sriov_detach(dev); mlx5_eswitch_disable(dev->priv.eswitch); + mlx5_sriov_detach(dev); mlx5_lag_remove_mdev(dev); mlx5_ec_cleanup(dev); mlx5_sf_hw_table_destroy(dev); @@ -1789,11 +1789,11 @@ static void remove_one(struct pci_dev *pdev) struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct devlink *devlink = priv_to_devlink(dev); + set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state); /* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain * fw_reset before unregistering the devlink. */ mlx5_drain_fw_reset(dev); - set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state); devlink_unregister(devlink); mlx5_sriov_disable(pdev); mlx5_crdump_disable(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 64d4e7125e9b..95dc67fb3001 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -82,6 +82,16 @@ static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_funct return func_id <= mlx5_core_max_vfs(dev) ? MLX5_VF : MLX5_SF; } +static u32 mlx5_get_ec_function(u32 function) +{ + return function >> 16; +} + +static u32 mlx5_get_func_id(u32 function) +{ + return function & 0xffff; +} + static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function) { struct rb_root *root; @@ -665,20 +675,22 @@ static int optimal_reclaimed_pages(void) } static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev, - struct rb_root *root, u16 func_id) + struct rb_root *root, u32 function) { u64 recl_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_PAGES)); unsigned long end = jiffies + recl_pages_to_jiffies; while (!RB_EMPTY_ROOT(root)) { + u32 ec_function = mlx5_get_ec_function(function); + u32 function_id = mlx5_get_func_id(function); int nclaimed; int err; - err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(), - &nclaimed, false, mlx5_core_is_ecpf(dev)); + err = reclaim_pages(dev, function_id, optimal_reclaimed_pages(), + &nclaimed, false, ec_function); if (err) { - mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n", - err, func_id); + mlx5_core_warn(dev, "reclaim_pages err (%d) func_id=0x%x ec_func=0x%x\n", + err, function_id, ec_function); return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index c5240d38c9db..09ed6e5fa6c3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -105,7 +105,6 @@ struct mlxsw_thermal { struct thermal_zone_device *tzdev; int polling_delay; struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX]; - u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1]; struct thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS]; struct mlxsw_cooling_states cooling_states[MLXSW_THERMAL_NUM_TRIPS]; struct mlxsw_thermal_area line_cards[]; @@ -468,7 +467,7 @@ static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev, return idx; /* Normalize the state to the valid speed range. */ - state = thermal->cooling_levels[state]; + state = max_t(unsigned long, MLXSW_THERMAL_MIN_STATE, state); mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state)); err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsc), mfsc_pl); if (err) { @@ -859,10 +858,6 @@ int mlxsw_thermal_init(struct mlxsw_core *core, } } - /* Initialize cooling levels per PWM state. */ - for (i = 0; i < MLXSW_THERMAL_MAX_STATE; i++) - thermal->cooling_levels[i] = max(MLXSW_THERMAL_MIN_STATE, i); - thermal->polling_delay = bus_info->low_frequency ? MLXSW_THERMAL_SLOW_POLL_INT : MLXSW_THERMAL_POLL_INT; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index a8f94b7544ee..02a327744a61 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2937,6 +2937,7 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused, static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp) { + refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 0); mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT; mutex_init(&mlxsw_sp->parsing.lock); @@ -2945,6 +2946,7 @@ static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp) static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp) { mutex_destroy(&mlxsw_sp->parsing.lock); + WARN_ON_ONCE(refcount_read(&mlxsw_sp->parsing.parsing_depth_ref)); } struct mlxsw_sp_ipv6_addr_node { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index 045a24cacfa5..b6ee2d658b0c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c @@ -1354,7 +1354,7 @@ static int mlxsw_sp_fid_8021q_port_vid_map(struct mlxsw_sp_fid *fid, u16 vid) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; int err; /* In case there are no {Port, VID} => FID mappings on the port, @@ -1391,7 +1391,7 @@ mlxsw_sp_fid_8021q_port_vid_unmap(struct mlxsw_sp_fid *fid, struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u8 local_port = mlxsw_sp_port->local_port; + u16 local_port = mlxsw_sp_port->local_port; mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid); mlxsw_sp_fid_evid_map(fid, local_port, vid, false); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 09e32778b012..4a73e2fe95ef 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -10381,11 +10381,23 @@ err_reg_write: old_inc_parsing_depth); return err; } + +static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp) +{ + bool old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth; + + mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, old_inc_parsing_depth, + false); +} #else static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp) { return 0; } + +static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp) +{ +} #endif static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp) @@ -10615,6 +10627,7 @@ err_register_inet6addr_notifier: err_register_inetaddr_notifier: mlxsw_core_flush_owq(); err_dscp_init: + mlxsw_sp_mp_hash_fini(mlxsw_sp); err_mp_hash_init: mlxsw_sp_neigh_fini(mlxsw_sp); err_neigh_init: @@ -10655,6 +10668,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb); unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb); mlxsw_core_flush_owq(); + mlxsw_sp_mp_hash_fini(mlxsw_sp); mlxsw_sp_neigh_fini(mlxsw_sp); mlxsw_sp_lb_rif_fini(mlxsw_sp); mlxsw_sp_vrs_fini(mlxsw_sp); diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c index bdb893476832..d0e6cd8dbe5c 100644 --- a/drivers/net/ethernet/mscc/ocelot_stats.c +++ b/drivers/net/ethernet/mscc/ocelot_stats.c @@ -258,6 +258,7 @@ struct ocelot_stat_layout { struct ocelot_stats_region { struct list_head node; u32 base; + enum ocelot_stat first_stat; int count; u32 *buf; }; @@ -273,6 +274,7 @@ static const struct ocelot_stat_layout ocelot_mm_stats_layout[OCELOT_NUM_STATS] OCELOT_STAT(RX_ASSEMBLY_OK), OCELOT_STAT(RX_MERGE_FRAGMENTS), OCELOT_STAT(TX_MERGE_FRAGMENTS), + OCELOT_STAT(TX_MM_HOLD), OCELOT_STAT(RX_PMAC_OCTETS), OCELOT_STAT(RX_PMAC_UNICAST), OCELOT_STAT(RX_PMAC_MULTICAST), @@ -341,11 +343,12 @@ static int ocelot_port_update_stats(struct ocelot *ocelot, int port) */ static void ocelot_port_transfer_stats(struct ocelot *ocelot, int port) { - unsigned int idx = port * OCELOT_NUM_STATS; struct ocelot_stats_region *region; int j; list_for_each_entry(region, &ocelot->stats_regions, node) { + unsigned int idx = port * OCELOT_NUM_STATS + region->first_stat; + for (j = 0; j < region->count; j++) { u64 *stat = &ocelot->stats[idx + j]; u64 val = region->buf[j]; @@ -355,8 +358,6 @@ static void ocelot_port_transfer_stats(struct ocelot *ocelot, int port) *stat = (*stat & ~(u64)U32_MAX) + val; } - - idx += region->count; } } @@ -899,7 +900,8 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot) if (!layout[i].reg) continue; - if (region && layout[i].reg == last + 4) { + if (region && ocelot->map[SYS][layout[i].reg & REG_MASK] == + ocelot->map[SYS][last & REG_MASK] + 4) { region->count++; } else { region = devm_kzalloc(ocelot->dev, sizeof(*region), @@ -914,6 +916,7 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot) WARN_ON(last >= layout[i].reg); region->base = layout[i].reg; + region->first_stat = i; region->count = 1; list_add_tail(®ion->node, &ocelot->stats_regions); } diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c index d17d1b4f2585..825356ee3492 100644 --- a/drivers/net/ethernet/natsemi/sonic.c +++ b/drivers/net/ethernet/natsemi/sonic.c @@ -292,7 +292,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) */ laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE); - if (!laddr) { + if (dma_mapping_error(lp->device, laddr)) { pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name); dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -509,7 +509,7 @@ static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp, *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE), SONIC_RBSIZE, DMA_FROM_DEVICE); - if (!*new_addr) { + if (dma_mapping_error(lp->device, *new_addr)) { dev_kfree_skb(*new_skb); *new_skb = NULL; return false; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index d61cd32ec3b6..86a93cac2647 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -5083,6 +5083,11 @@ static int qed_init_wfq_param(struct qed_hwfn *p_hwfn, num_vports = p_hwfn->qm_info.num_vports; + if (num_vports < 2) { + DP_NOTICE(p_hwfn, "Unexpected num_vports: %d\n", num_vports); + return -EINVAL; + } + /* Accounting for the vports which are configured for WFQ explicitly */ for (i = 0; i < num_vports; i++) { u32 tmp_speed; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c index 6190adf965bc..f55eed092f25 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c @@ -422,7 +422,7 @@ qed_mfw_get_tlv_time_value(struct qed_mfw_tlv_time *p_time, if (p_time->hour > 23) p_time->hour = 0; if (p_time->min > 59) - p_time->hour = 0; + p_time->min = 0; if (p_time->msec > 999) p_time->msec = 0; if (p_time->usec > 999) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 2bf18748581d..fa167b1aa019 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -4404,6 +4404,9 @@ qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate) } vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true); + if (!vf) + return -EINVAL; + vport_id = vf->vport_id; return qed_configure_vport_wfq(cdev, vport_id, rate); @@ -5152,7 +5155,7 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) /* Validate that the VF has a configured vport */ vf = qed_iov_get_vf_info(hwfn, i, true); - if (!vf->vport_instance) + if (!vf || !vf->vport_instance) continue; memset(¶ms, 0, sizeof(params)); diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 3115b2c12898..eaa50050aa0b 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -724,9 +724,15 @@ static int emac_remove(struct platform_device *pdev) struct net_device *netdev = dev_get_drvdata(&pdev->dev); struct emac_adapter *adpt = netdev_priv(netdev); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + unregister_netdev(netdev); netif_napi_del(&adpt->rx_q.napi); + free_irq(adpt->irq.irq, &adpt->irq); + cancel_work_sync(&adpt->work_thread); + emac_clks_teardown(adpt); put_device(&adpt->phydev->mdio.dev); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 0f54849a3823..894e2690c643 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1455,8 +1455,6 @@ static int ravb_phy_init(struct net_device *ndev) phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); } - /* Indicate that the MAC is responsible for managing PHY PM */ - phydev->mac_managed_pm = true; phy_attached_info(phydev); return 0; @@ -2379,6 +2377,8 @@ static int ravb_mdio_init(struct ravb_private *priv) { struct platform_device *pdev = priv->pdev; struct device *dev = &pdev->dev; + struct phy_device *phydev; + struct device_node *pn; int error; /* Bitbang init */ @@ -2400,6 +2400,14 @@ static int ravb_mdio_init(struct ravb_private *priv) if (error) goto out_free_bus; + pn = of_parse_phandle(dev->of_node, "phy-handle", 0); + phydev = of_phy_find_device(pn); + if (phydev) { + phydev->mac_managed_pm = true; + put_device(&phydev->mdio.dev); + } + of_node_put(pn); + return 0; out_free_bus: diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index 853394e5bb8b..c4f93d24c6a4 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -702,13 +702,14 @@ static bool rswitch_rx(struct net_device *ndev, int *quota) u16 pkt_len; u32 get_ts; + if (*quota <= 0) + return true; + boguscnt = min_t(int, gq->ring_size, *quota); limit = boguscnt; desc = &gq->rx_ring[gq->cur]; while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) { - if (--boguscnt < 0) - break; dma_rmb(); pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS; skb = gq->skbs[gq->cur]; @@ -734,6 +735,9 @@ static bool rswitch_rx(struct net_device *ndev, int *quota) gq->cur = rswitch_next_queue_index(gq, true, 1); desc = &gq->rx_ring[gq->cur]; + + if (--boguscnt <= 0) + break; } num = rswitch_get_num_cur_queues(gq); @@ -745,7 +749,7 @@ static bool rswitch_rx(struct net_device *ndev, int *quota) goto err; gq->dirty = rswitch_next_queue_index(gq, false, num); - *quota -= limit - (++boguscnt); + *quota -= limit - boguscnt; return boguscnt <= 0; @@ -1437,7 +1441,10 @@ static int rswitch_open(struct net_device *ndev) rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true); rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true); - iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE); + if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) + iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE); + + bitmap_set(rdev->priv->opened_ports, rdev->port, 1); return 0; }; @@ -1448,8 +1455,10 @@ static int rswitch_stop(struct net_device *ndev) struct rswitch_gwca_ts_info *ts_info, *ts_info2; netif_tx_stop_all_queues(ndev); + bitmap_clear(rdev->priv->opened_ports, rdev->port, 1); - iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID); + if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) + iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID); list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) { if (ts_info->port != rdev->port) diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h index 27d3d38c055f..b3e0411b408e 100644 --- a/drivers/net/ethernet/renesas/rswitch.h +++ b/drivers/net/ethernet/renesas/rswitch.h @@ -998,6 +998,7 @@ struct rswitch_private { struct rcar_gen4_ptp_private *ptp_priv; struct rswitch_device *rdev[RSWITCH_NUM_PORTS]; + DECLARE_BITMAP(opened_ports, RSWITCH_NUM_PORTS); struct rswitch_gwca gwca; struct rswitch_etha etha[RSWITCH_NUM_PORTS]; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index ed17163d7811..d8ec729825be 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2029,8 +2029,6 @@ static int sh_eth_phy_init(struct net_device *ndev) if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) phy_set_max_speed(phydev, SPEED_100); - /* Indicate that the MAC is responsible for managing PHY PM */ - phydev->mac_managed_pm = true; phy_attached_info(phydev); return 0; @@ -3097,6 +3095,8 @@ static int sh_mdio_init(struct sh_eth_private *mdp, struct bb_info *bitbang; struct platform_device *pdev = mdp->pdev; struct device *dev = &mdp->pdev->dev; + struct phy_device *phydev; + struct device_node *pn; /* create bit control struct for PHY */ bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL); @@ -3133,6 +3133,14 @@ static int sh_mdio_init(struct sh_eth_private *mdp, if (ret) goto out_free_bus; + pn = of_parse_phandle(dev->of_node, "phy-handle", 0); + phydev = of_phy_find_device(pn); + if (phydev) { + phydev->mac_managed_pm = true; + put_device(&phydev->mdio.dev); + } + of_node_put(pn); + return 0; out_free_bus: diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 6b5d96bced47..ec9c130276d8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -418,6 +418,7 @@ struct dma_features { unsigned int frpbs; unsigned int frpes; unsigned int addr64; + unsigned int host_dma_width; unsigned int rssen; unsigned int vlhash; unsigned int sphen; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c index ac8580f501e2..2a2be65d65a0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c @@ -213,8 +213,7 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev) struct device_node *np = dev->of_node; int err = 0; - if (of_get_property(np, "snps,rmii_refclk_ext", NULL)) - dwmac->rmii_refclk_ext = true; + dwmac->rmii_refclk_ext = of_property_read_bool(np, "snps,rmii_refclk_ext"); dwmac->clk_tx = devm_clk_get(dev, "tx"); if (IS_ERR(dwmac->clk_tx)) { @@ -289,7 +288,7 @@ static int imx_dwmac_probe(struct platform_device *pdev) goto err_parse_dt; } - plat_dat->addr64 = dwmac->ops->addr_width; + plat_dat->host_dma_width = dwmac->ops->addr_width; plat_dat->init = imx_dwmac_init; plat_dat->exit = imx_dwmac_exit; plat_dat->clks_config = imx_dwmac_clks_config; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index 7deb1f817dac..13aa919633b4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -684,7 +684,7 @@ static int ehl_pse0_common_data(struct pci_dev *pdev, intel_priv->is_pse = true; plat->bus_id = 2; - plat->addr64 = 32; + plat->host_dma_width = 32; plat->clk_ptp_rate = 200000000; @@ -725,7 +725,7 @@ static int ehl_pse1_common_data(struct pci_dev *pdev, intel_priv->is_pse = true; plat->bus_id = 3; - plat->addr64 = 32; + plat->host_dma_width = 32; plat->clk_ptp_rate = 200000000; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c index 2f7d8e4561d9..9ae31e3dc821 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c @@ -591,7 +591,7 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev, plat->use_phy_wol = priv_plat->mac_wol ? 0 : 1; plat->riwt_off = 1; plat->maxmtu = ETH_DATA_LEN; - plat->addr64 = priv_plat->variant->dma_bit_mask; + plat->host_dma_width = priv_plat->variant->dma_bit_mask; plat->bsp_priv = priv_plat; plat->init = mediatek_dwmac_init; plat->clks_config = mediatek_dwmac_clks_config; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8f543c3ab5c5..17310ade88dd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1431,7 +1431,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); - if (priv->dma_cap.addr64 <= 32) + if (priv->dma_cap.host_dma_width <= 32) gfp |= GFP_DMA32; if (!buf->page) { @@ -4587,7 +4587,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) unsigned int entry = rx_q->dirty_rx; gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); - if (priv->dma_cap.addr64 <= 32) + if (priv->dma_cap.host_dma_width <= 32) gfp |= GFP_DMA32; while (dirty-- > 0) { @@ -6205,7 +6205,7 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v) seq_printf(seq, "\tFlexible RX Parser: %s\n", priv->dma_cap.frpsel ? "Y" : "N"); seq_printf(seq, "\tEnhanced Addressing: %d\n", - priv->dma_cap.addr64); + priv->dma_cap.host_dma_width); seq_printf(seq, "\tReceive Side Scaling: %s\n", priv->dma_cap.rssen ? "Y" : "N"); seq_printf(seq, "\tVLAN Hash Filtering: %s\n", @@ -7178,20 +7178,22 @@ int stmmac_dvr_probe(struct device *device, dev_info(priv->device, "SPH feature enabled\n"); } - /* The current IP register MAC_HW_Feature1[ADDR64] only define - * 32/40/64 bit width, but some SOC support others like i.MX8MP - * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64]. - * So overwrite dma_cap.addr64 according to HW real design. + /* Ideally our host DMA address width is the same as for the + * device. However, it may differ and then we have to use our + * host DMA width for allocation and the device DMA width for + * register handling. */ - if (priv->plat->addr64) - priv->dma_cap.addr64 = priv->plat->addr64; + if (priv->plat->host_dma_width) + priv->dma_cap.host_dma_width = priv->plat->host_dma_width; + else + priv->dma_cap.host_dma_width = priv->dma_cap.addr64; - if (priv->dma_cap.addr64) { + if (priv->dma_cap.host_dma_width) { ret = dma_set_mask_and_coherent(device, - DMA_BIT_MASK(priv->dma_cap.addr64)); + DMA_BIT_MASK(priv->dma_cap.host_dma_width)); if (!ret) { - dev_info(priv->device, "Using %d bits DMA width\n", - priv->dma_cap.addr64); + dev_info(priv->device, "Using %d/%d bits DMA host/device width\n", + priv->dma_cap.host_dma_width, priv->dma_cap.addr64); /* * If more than 32 bits can be addressed, make sure to @@ -7206,7 +7208,7 @@ int stmmac_dvr_probe(struct device *device, goto error_hw_init; } - priv->dma_cap.addr64 = 32; + priv->dma_cap.host_dma_width = 32; } } diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index 8addee6d04bd..734a817d3c94 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -287,6 +287,9 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) hp = mdesc_grab(); + if (!hp) + return -ENODEV; + rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len); err = -ENODEV; if (!rmac) { diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index e6144d963eaa..ab8b09a9ef61 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -9271,7 +9271,7 @@ static int niu_get_of_props(struct niu *np) if (model) strcpy(np->vpd.model, model); - if (of_find_property(dp, "hot-swappable-phy", NULL)) { + if (of_property_read_bool(dp, "hot-swappable-phy")) { np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | NIU_FLAGS_HOTPLUG_PHY); } diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index fe86fbd58586..e220620d0ffc 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -433,6 +433,9 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) hp = mdesc_grab(); + if (!hp) + return -ENODEV; + vp = vnet_find_parent(hp, vdev->mp, vdev); if (IS_ERR(vp)) { pr_err("Cannot find port parent vnet\n"); diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c index 16ee9c29cb35..8caf85acbb6a 100644 --- a/drivers/net/ethernet/ti/am65-cpts.c +++ b/drivers/net/ethernet/ti/am65-cpts.c @@ -636,6 +636,10 @@ static void am65_cpts_perout_enable_hw(struct am65_cpts *cpts, val = lower_32_bits(cycles); am65_cpts_write32(cpts, val, genf[req->index].length); + am65_cpts_write32(cpts, 0, genf[req->index].control); + am65_cpts_write32(cpts, 0, genf[req->index].ppm_hi); + am65_cpts_write32(cpts, 0, genf[req->index].ppm_low); + cpts->genf_enable |= BIT(req->index); } else { am65_cpts_write32(cpts, 0, genf[req->index].length); diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index e8f38e3f7706..25e707d7b87c 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c @@ -226,8 +226,7 @@ static int cpsw_phy_sel_probe(struct platform_device *pdev) if (IS_ERR(priv->gmii_sel)) return PTR_ERR(priv->gmii_sel); - if (of_find_property(pdev->dev.of_node, "rmii-clock-ext", NULL)) - priv->rmii_clock_external = true; + priv->rmii_clock_external = of_property_read_bool(pdev->dev.of_node, "rmii-clock-ext"); dev_set_drvdata(&pdev->dev, priv); diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 751fb0bc65c5..2adf82a32bf6 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -3583,13 +3583,11 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, /* init the hw stats lock */ spin_lock_init(&gbe_dev->hw_stats_lock); - if (of_find_property(node, "enable-ale", NULL)) { - gbe_dev->enable_ale = true; + gbe_dev->enable_ale = of_property_read_bool(node, "enable-ale"); + if (gbe_dev->enable_ale) dev_info(dev, "ALE enabled\n"); - } else { - gbe_dev->enable_ale = false; + else dev_dbg(dev, "ALE bypass enabled*\n"); - } ret = of_property_read_u32(node, "tx-queue", &gbe_dev->tx_queue_id); diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index cf8de8a7a8a1..9d535ae59626 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -317,15 +317,17 @@ static int gelic_card_init_chain(struct gelic_card *card, /* set up the hardware pointers in each descriptor */ for (i = 0; i < no; i++, descr++) { + dma_addr_t cpu_addr; + gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE); - descr->bus_addr = - dma_map_single(ctodev(card), descr, - GELIC_DESCR_SIZE, - DMA_BIDIRECTIONAL); - if (!descr->bus_addr) + cpu_addr = dma_map_single(ctodev(card), descr, + GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL); + + if (dma_mapping_error(ctodev(card), cpu_addr)) goto iommu_error; + descr->bus_addr = cpu_to_be32(cpu_addr); descr->next = descr + 1; descr->prev = descr - 1; } @@ -365,26 +367,28 @@ iommu_error: * * allocates a new rx skb, iommu-maps it and attaches it to the descriptor. * Activate the descriptor state-wise + * + * Gelic RX sk_buffs must be aligned to GELIC_NET_RXBUF_ALIGN and the length + * must be a multiple of GELIC_NET_RXBUF_ALIGN. */ static int gelic_descr_prepare_rx(struct gelic_card *card, struct gelic_descr *descr) { + static const unsigned int rx_skb_size = + ALIGN(GELIC_NET_MAX_FRAME, GELIC_NET_RXBUF_ALIGN) + + GELIC_NET_RXBUF_ALIGN - 1; + dma_addr_t cpu_addr; int offset; - unsigned int bufsize; if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE) dev_info(ctodev(card), "%s: ERROR status\n", __func__); - /* we need to round up the buffer size to a multiple of 128 */ - bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN); - /* and we need to have it 128 byte aligned, therefore we allocate a - * bit more */ - descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1); + descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size); if (!descr->skb) { descr->buf_addr = 0; /* tell DMAC don't touch memory */ return -ENOMEM; } - descr->buf_size = cpu_to_be32(bufsize); + descr->buf_size = cpu_to_be32(rx_skb_size); descr->dmac_cmd_status = 0; descr->result_size = 0; descr->valid_size = 0; @@ -395,11 +399,10 @@ static int gelic_descr_prepare_rx(struct gelic_card *card, if (offset) skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset); /* io-mmu-map the skb */ - descr->buf_addr = cpu_to_be32(dma_map_single(ctodev(card), - descr->skb->data, - GELIC_NET_MAX_MTU, - DMA_FROM_DEVICE)); - if (!descr->buf_addr) { + cpu_addr = dma_map_single(ctodev(card), descr->skb->data, + GELIC_NET_MAX_FRAME, DMA_FROM_DEVICE); + descr->buf_addr = cpu_to_be32(cpu_addr); + if (dma_mapping_error(ctodev(card), cpu_addr)) { dev_kfree_skb_any(descr->skb); descr->skb = NULL; dev_info(ctodev(card), @@ -779,7 +782,7 @@ static int gelic_descr_prepare_tx(struct gelic_card *card, buf = dma_map_single(ctodev(card), skb->data, skb->len, DMA_TO_DEVICE); - if (!buf) { + if (dma_mapping_error(ctodev(card), buf)) { dev_err(ctodev(card), "dma map 2 failed (%p, %i). Dropping packet\n", skb->data, skb->len); @@ -915,7 +918,7 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr, data_error = be32_to_cpu(descr->data_error); /* unmap skb buffer */ dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr), - GELIC_NET_MAX_MTU, + GELIC_NET_MAX_FRAME, DMA_FROM_DEVICE); skb_put(skb, be32_to_cpu(descr->valid_size)? diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h index 68f324ed4eaf..0d98defb011e 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h @@ -19,8 +19,9 @@ #define GELIC_NET_RX_DESCRIPTORS 128 /* num of descriptors */ #define GELIC_NET_TX_DESCRIPTORS 128 /* num of descriptors */ -#define GELIC_NET_MAX_MTU VLAN_ETH_FRAME_LEN -#define GELIC_NET_MIN_MTU VLAN_ETH_ZLEN +#define GELIC_NET_MAX_FRAME 2312 +#define GELIC_NET_MAX_MTU 2294 +#define GELIC_NET_MIN_MTU 64 #define GELIC_NET_RXBUF_ALIGN 128 #define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */ #define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index a502812ac418..86f7843b4591 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2709,8 +2709,7 @@ static int velocity_get_platform_info(struct velocity_info *vptr) struct resource res; int ret; - if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL)) - vptr->no_eeprom = 1; + vptr->no_eeprom = of_property_read_bool(vptr->dev->of_node, "no-eeprom"); ret = of_address_to_resource(vptr->dev->of_node, 0, &res); if (ret) { diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h index ffdac6fac054..f64ed39b93d8 100644 --- a/drivers/net/ethernet/via/via-velocity.h +++ b/drivers/net/ethernet/via/via-velocity.h @@ -1383,7 +1383,7 @@ struct velocity_info { struct device *dev; struct pci_dev *pdev; struct net_device *netdev; - int no_eeprom; + bool no_eeprom; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; u8 ip_addr[4]; diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 1066420d6a83..e0ac1bcd9925 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1455,12 +1455,11 @@ static int temac_probe(struct platform_device *pdev) * endianness mode. Default for OF devices is big-endian. */ little_endian = false; - if (temac_np) { - if (of_get_property(temac_np, "little-endian", NULL)) - little_endian = true; - } else if (pdata) { + if (temac_np) + little_endian = of_property_read_bool(temac_np, "little-endian"); + else if (pdata) little_endian = pdata->reg_little_endian; - } + if (little_endian) { lp->temac_ior = _temac_ior_le; lp->temac_iow = _temac_iow_le; diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index 894e92ef415b..9f505cf02d96 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -503,6 +503,11 @@ static void xirc2ps_detach(struct pcmcia_device *link) { struct net_device *dev = link->priv; + struct local_info *local = netdev_priv(dev); + + netif_carrier_off(dev); + netif_tx_disable(dev); + cancel_work_sync(&local->tx_timeout_task); dev_dbg(&link->dev, "detach\n"); diff --git a/drivers/net/ipa/gsi_reg.c b/drivers/net/ipa/gsi_reg.c index 1412b67304c8..1651fbad4bd5 100644 --- a/drivers/net/ipa/gsi_reg.c +++ b/drivers/net/ipa/gsi_reg.c @@ -15,6 +15,14 @@ static bool gsi_reg_id_valid(struct gsi *gsi, enum gsi_reg_id reg_id) switch (reg_id) { case INTER_EE_SRC_CH_IRQ_MSK: case INTER_EE_SRC_EV_CH_IRQ_MSK: + return gsi->version >= IPA_VERSION_3_5; + + case HW_PARAM_2: + return gsi->version >= IPA_VERSION_3_5_1; + + case HW_PARAM_4: + return gsi->version >= IPA_VERSION_5_0; + case CH_C_CNTXT_0: case CH_C_CNTXT_1: case CH_C_CNTXT_2: @@ -43,7 +51,6 @@ static bool gsi_reg_id_valid(struct gsi *gsi, enum gsi_reg_id reg_id) case CH_CMD: case EV_CH_CMD: case GENERIC_CMD: - case HW_PARAM_2: case CNTXT_TYPE_IRQ: case CNTXT_TYPE_IRQ_MSK: case CNTXT_SRC_CH_IRQ: diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h index f62f0a5c653d..48fde65fa2e8 100644 --- a/drivers/net/ipa/gsi_reg.h +++ b/drivers/net/ipa/gsi_reg.h @@ -10,6 +10,10 @@ #include <linux/bits.h> +struct platform_device; + +struct gsi; + /** * DOC: GSI Registers * diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c index 735fa6591609..3f475428dddd 100644 --- a/drivers/net/ipa/ipa_reg.c +++ b/drivers/net/ipa/ipa_reg.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2022 Linaro Ltd. + * Copyright (C) 2019-2023 Linaro Ltd. */ #include <linux/io.h> @@ -15,6 +15,17 @@ static bool ipa_reg_id_valid(struct ipa *ipa, enum ipa_reg_id reg_id) enum ipa_version version = ipa->version; switch (reg_id) { + case FILT_ROUT_HASH_EN: + return version == IPA_VERSION_4_2; + + case FILT_ROUT_HASH_FLUSH: + return version < IPA_VERSION_5_0 && version != IPA_VERSION_4_2; + + case FILT_ROUT_CACHE_FLUSH: + case ENDP_FILTER_CACHE_CFG: + case ENDP_ROUTER_CACHE_CFG: + return version >= IPA_VERSION_5_0; + case IPA_BCR: case COUNTER_CFG: return version < IPA_VERSION_4_5; @@ -32,14 +43,17 @@ static bool ipa_reg_id_valid(struct ipa *ipa, enum ipa_reg_id reg_id) case SRC_RSRC_GRP_45_RSRC_TYPE: case DST_RSRC_GRP_45_RSRC_TYPE: return version <= IPA_VERSION_3_1 || - version == IPA_VERSION_4_5; + version == IPA_VERSION_4_5 || + version == IPA_VERSION_5_0; case SRC_RSRC_GRP_67_RSRC_TYPE: case DST_RSRC_GRP_67_RSRC_TYPE: - return version <= IPA_VERSION_3_1; + return version <= IPA_VERSION_3_1 || + version == IPA_VERSION_5_0; case ENDP_FILTER_ROUTER_HSH_CFG: - return version != IPA_VERSION_4_2; + return version < IPA_VERSION_5_0 && + version != IPA_VERSION_4_2; case IRQ_SUSPEND_EN: case IRQ_SUSPEND_CLR: @@ -51,10 +65,6 @@ static bool ipa_reg_id_valid(struct ipa *ipa, enum ipa_reg_id reg_id) case SHARED_MEM_SIZE: case QSB_MAX_WRITES: case QSB_MAX_READS: - case FILT_ROUT_HASH_EN: - case FILT_ROUT_CACHE_CFG: - case FILT_ROUT_HASH_FLUSH: - case FILT_ROUT_CACHE_FLUSH: case STATE_AGGR_ACTIVE: case LOCAL_PKT_PROC_CNTXT: case AGGR_FORCE_CLOSE: @@ -76,8 +86,6 @@ static bool ipa_reg_id_valid(struct ipa *ipa, enum ipa_reg_id reg_id) case ENDP_INIT_RSRC_GRP: case ENDP_INIT_SEQ: case ENDP_STATUS: - case ENDP_FILTER_CACHE_CFG: - case ENDP_ROUTER_CACHE_CFG: case IPA_IRQ_STTS: case IPA_IRQ_EN: case IPA_IRQ_CLR: diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h index 28aa1351dd48..7dd65d39333d 100644 --- a/drivers/net/ipa/ipa_reg.h +++ b/drivers/net/ipa/ipa_reg.h @@ -60,9 +60,8 @@ enum ipa_reg_id { SHARED_MEM_SIZE, QSB_MAX_WRITES, QSB_MAX_READS, - FILT_ROUT_HASH_EN, /* Not IPA v5.0+ */ - FILT_ROUT_CACHE_CFG, /* IPA v5.0+ */ - FILT_ROUT_HASH_FLUSH, /* Not IPA v5.0+ */ + FILT_ROUT_HASH_EN, /* IPA v4.2 */ + FILT_ROUT_HASH_FLUSH, /* Not IPA v4.2 nor IPA v5.0+ */ FILT_ROUT_CACHE_FLUSH, /* IPA v5.0+ */ STATE_AGGR_ACTIVE, IPA_BCR, /* Not IPA v4.5+ */ @@ -77,12 +76,12 @@ enum ipa_reg_id { TIMERS_PULSE_GRAN_CFG, /* IPA v4.5+ */ SRC_RSRC_GRP_01_RSRC_TYPE, SRC_RSRC_GRP_23_RSRC_TYPE, - SRC_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+, IPA v4.5 */ - SRC_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+ */ + SRC_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+; IPA v4.5, IPA v5.0 */ + SRC_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+; IPA v5.0 */ DST_RSRC_GRP_01_RSRC_TYPE, DST_RSRC_GRP_23_RSRC_TYPE, - DST_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+, IPA v4.5 */ - DST_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+ */ + DST_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+; IPA v4.5, IPA v5.0 */ + DST_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+; IPA v5.0 */ ENDP_INIT_CTRL, /* Not IPA v4.2+ for TX, not IPA v4.0+ for RX */ ENDP_INIT_CFG, ENDP_INIT_NAT, /* TX only */ @@ -206,14 +205,6 @@ enum ipa_reg_qsb_max_reads_field_id { GEN_QMB_1_MAX_READS_BEATS, /* IPA v4.0+ */ }; -/* FILT_ROUT_CACHE_CFG register */ -enum ipa_reg_filt_rout_cache_cfg_field_id { - ROUTER_CACHE_EN, - FILTER_CACHE_EN, - LOW_PRI_HASH_HIT_DISABLE, - LRU_EVICTION_THRESHOLD, -}; - /* FILT_ROUT_HASH_EN and FILT_ROUT_HASH_FLUSH registers */ enum ipa_reg_filt_rout_hash_field_id { IPV6_ROUTER_HASH, diff --git a/drivers/net/ipa/reg.h b/drivers/net/ipa/reg.h index 57b457f39b6e..2ee07eebca67 100644 --- a/drivers/net/ipa/reg.h +++ b/drivers/net/ipa/reg.h @@ -6,7 +6,8 @@ #define _REG_H_ #include <linux/types.h> -#include <linux/bits.h> +#include <linux/log2.h> +#include <linux/bug.h> /** * struct reg - A register descriptor diff --git a/drivers/net/ipa/reg/gsi_reg-v4.5.c b/drivers/net/ipa/reg/gsi_reg-v4.5.c index 648b51b88d4e..2900e5c3ff88 100644 --- a/drivers/net/ipa/reg/gsi_reg-v4.5.c +++ b/drivers/net/ipa/reg/gsi_reg-v4.5.c @@ -137,17 +137,17 @@ REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1, 0x0001004c + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0, - 0x0001e000 + 0x4000 * GSI_EE_AP, 0x08); + 0x00011000 + 0x4000 * GSI_EE_AP, 0x08); REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0, - 0x0001e100 + 0x4000 * GSI_EE_AP, 0x08); + 0x00011100 + 0x4000 * GSI_EE_AP, 0x08); static const u32 reg_gsi_status_fmask[] = { [ENABLED] = BIT(0), /* Bits 1-31 reserved */ }; -REG_FIELDS(GSI_STATUS, gsi_status, 0x0001f000 + 0x4000 * GSI_EE_AP); +REG_FIELDS(GSI_STATUS, gsi_status, 0x00012000 + 0x4000 * GSI_EE_AP); static const u32 reg_ch_cmd_fmask[] = { [CH_CHID] = GENMASK(7, 0), @@ -155,7 +155,7 @@ static const u32 reg_ch_cmd_fmask[] = { [CH_OPCODE] = GENMASK(31, 24), }; -REG_FIELDS(CH_CMD, ch_cmd, 0x0001f008 + 0x4000 * GSI_EE_AP); +REG_FIELDS(CH_CMD, ch_cmd, 0x00012008 + 0x4000 * GSI_EE_AP); static const u32 reg_ev_ch_cmd_fmask[] = { [EV_CHID] = GENMASK(7, 0), @@ -163,7 +163,7 @@ static const u32 reg_ev_ch_cmd_fmask[] = { [EV_OPCODE] = GENMASK(31, 24), }; -REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x0001f010 + 0x4000 * GSI_EE_AP); +REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x00012010 + 0x4000 * GSI_EE_AP); static const u32 reg_generic_cmd_fmask[] = { [GENERIC_OPCODE] = GENMASK(4, 0), @@ -172,7 +172,7 @@ static const u32 reg_generic_cmd_fmask[] = { /* Bits 14-31 reserved */ }; -REG_FIELDS(GENERIC_CMD, generic_cmd, 0x0001f018 + 0x4000 * GSI_EE_AP); +REG_FIELDS(GENERIC_CMD, generic_cmd, 0x00012018 + 0x4000 * GSI_EE_AP); static const u32 reg_hw_param_2_fmask[] = { [IRAM_SIZE] = GENMASK(2, 0), @@ -188,58 +188,58 @@ static const u32 reg_hw_param_2_fmask[] = { [GSI_USE_INTER_EE] = BIT(31), }; -REG_FIELDS(HW_PARAM_2, hw_param_2, 0x0001f040 + 0x4000 * GSI_EE_AP); +REG_FIELDS(HW_PARAM_2, hw_param_2, 0x00012040 + 0x4000 * GSI_EE_AP); -REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x0001f080 + 0x4000 * GSI_EE_AP); +REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x00012080 + 0x4000 * GSI_EE_AP); -REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x0001f088 + 0x4000 * GSI_EE_AP); +REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x00012088 + 0x4000 * GSI_EE_AP); -REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x0001f090 + 0x4000 * GSI_EE_AP); +REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x00012090 + 0x4000 * GSI_EE_AP); -REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x0001f094 + 0x4000 * GSI_EE_AP); +REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x00012094 + 0x4000 * GSI_EE_AP); REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk, - 0x0001f098 + 0x4000 * GSI_EE_AP); + 0x00012098 + 0x4000 * GSI_EE_AP); REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk, - 0x0001f09c + 0x4000 * GSI_EE_AP); + 0x0001209c + 0x4000 * GSI_EE_AP); REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr, - 0x0001f0a0 + 0x4000 * GSI_EE_AP); + 0x000120a0 + 0x4000 * GSI_EE_AP); REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr, - 0x0001f0a4 + 0x4000 * GSI_EE_AP); + 0x000120a4 + 0x4000 * GSI_EE_AP); -REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x0001f0b0 + 0x4000 * GSI_EE_AP); +REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x000120b0 + 0x4000 * GSI_EE_AP); REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk, - 0x0001f0b8 + 0x4000 * GSI_EE_AP); + 0x000120b8 + 0x4000 * GSI_EE_AP); REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr, - 0x0001f0c0 + 0x4000 * GSI_EE_AP); + 0x000120c0 + 0x4000 * GSI_EE_AP); -REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x0001f100 + 0x4000 * GSI_EE_AP); +REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x00012100 + 0x4000 * GSI_EE_AP); -REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x0001f108 + 0x4000 * GSI_EE_AP); +REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x00012108 + 0x4000 * GSI_EE_AP); -REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x0001f110 + 0x4000 * GSI_EE_AP); +REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x00012110 + 0x4000 * GSI_EE_AP); -REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x0001f118 + 0x4000 * GSI_EE_AP); +REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x00012118 + 0x4000 * GSI_EE_AP); -REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x0001f120 + 0x4000 * GSI_EE_AP); +REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x00012120 + 0x4000 * GSI_EE_AP); -REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x0001f128 + 0x4000 * GSI_EE_AP); +REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x00012128 + 0x4000 * GSI_EE_AP); static const u32 reg_cntxt_intset_fmask[] = { [INTYPE] = BIT(0) /* Bits 1-31 reserved */ }; -REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x0001f180 + 0x4000 * GSI_EE_AP); +REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x00012180 + 0x4000 * GSI_EE_AP); -REG_FIELDS(ERROR_LOG, error_log, 0x0001f200 + 0x4000 * GSI_EE_AP); +REG_FIELDS(ERROR_LOG, error_log, 0x00012200 + 0x4000 * GSI_EE_AP); -REG(ERROR_LOG_CLR, error_log_clr, 0x0001f210 + 0x4000 * GSI_EE_AP); +REG(ERROR_LOG_CLR, error_log_clr, 0x00012210 + 0x4000 * GSI_EE_AP); static const u32 reg_cntxt_scratch_0_fmask[] = { [INTER_EE_RESULT] = GENMASK(2, 0), @@ -248,7 +248,7 @@ static const u32 reg_cntxt_scratch_0_fmask[] = { /* Bits 8-31 reserved */ }; -REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x0001f400 + 0x4000 * GSI_EE_AP); +REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x00012400 + 0x4000 * GSI_EE_AP); static const struct reg *reg_array[] = { [INTER_EE_SRC_CH_IRQ_MSK] = ®_inter_ee_src_ch_irq_msk, diff --git a/drivers/net/ipa/reg/gsi_reg-v4.9.c b/drivers/net/ipa/reg/gsi_reg-v4.9.c index 4bf45d264d6b..8b5d95425a76 100644 --- a/drivers/net/ipa/reg/gsi_reg-v4.9.c +++ b/drivers/net/ipa/reg/gsi_reg-v4.9.c @@ -27,7 +27,7 @@ static const u32 reg_ch_c_cntxt_0_fmask[] = { }; REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0, - 0x0001c000 + 0x4000 * GSI_EE_AP, 0x80); + 0x0000f000 + 0x4000 * GSI_EE_AP, 0x80); static const u32 reg_ch_c_cntxt_1_fmask[] = { [CH_R_LENGTH] = GENMASK(19, 0), @@ -35,11 +35,11 @@ static const u32 reg_ch_c_cntxt_1_fmask[] = { }; REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1, - 0x0001c004 + 0x4000 * GSI_EE_AP, 0x80); + 0x0000f004 + 0x4000 * GSI_EE_AP, 0x80); -REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0001c008 + 0x4000 * GSI_EE_AP, 0x80); +REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0000f008 + 0x4000 * GSI_EE_AP, 0x80); -REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0001c00c + 0x4000 * GSI_EE_AP, 0x80); +REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0000f00c + 0x4000 * GSI_EE_AP, 0x80); static const u32 reg_ch_c_qos_fmask[] = { [WRR_WEIGHT] = GENMASK(3, 0), @@ -53,7 +53,7 @@ static const u32 reg_ch_c_qos_fmask[] = { /* Bits 25-31 reserved */ }; -REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0001c05c + 0x4000 * GSI_EE_AP, 0x80); +REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0000f05c + 0x4000 * GSI_EE_AP, 0x80); static const u32 reg_error_log_fmask[] = { [ERR_ARG3] = GENMASK(3, 0), @@ -67,16 +67,16 @@ static const u32 reg_error_log_fmask[] = { }; REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0, - 0x0001c060 + 0x4000 * GSI_EE_AP, 0x80); + 0x0000f060 + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1, - 0x0001c064 + 0x4000 * GSI_EE_AP, 0x80); + 0x0000f064 + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2, - 0x0001c068 + 0x4000 * GSI_EE_AP, 0x80); + 0x0000f068 + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3, - 0x0001c06c + 0x4000 * GSI_EE_AP, 0x80); + 0x0000f06c + 0x4000 * GSI_EE_AP, 0x80); static const u32 reg_ev_ch_e_cntxt_0_fmask[] = { [EV_CHTYPE] = GENMASK(3, 0), @@ -89,23 +89,23 @@ static const u32 reg_ev_ch_e_cntxt_0_fmask[] = { }; REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0, - 0x0001d000 + 0x4000 * GSI_EE_AP, 0x80); + 0x00010000 + 0x4000 * GSI_EE_AP, 0x80); static const u32 reg_ev_ch_e_cntxt_1_fmask[] = { [R_LENGTH] = GENMASK(15, 0), }; REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1, - 0x0001d004 + 0x4000 * GSI_EE_AP, 0x80); + 0x00010004 + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2, - 0x0001d008 + 0x4000 * GSI_EE_AP, 0x80); + 0x00010008 + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3, - 0x0001d00c + 0x4000 * GSI_EE_AP, 0x80); + 0x0001000c + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4, - 0x0001d010 + 0x4000 * GSI_EE_AP, 0x80); + 0x00010010 + 0x4000 * GSI_EE_AP, 0x80); static const u32 reg_ev_ch_e_cntxt_8_fmask[] = { [EV_MODT] = GENMASK(15, 0), @@ -114,28 +114,28 @@ static const u32 reg_ev_ch_e_cntxt_8_fmask[] = { }; REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8, - 0x0001d020 + 0x4000 * GSI_EE_AP, 0x80); + 0x00010020 + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9, - 0x0001d024 + 0x4000 * GSI_EE_AP, 0x80); + 0x00010024 + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10, - 0x0001d028 + 0x4000 * GSI_EE_AP, 0x80); + 0x00010028 + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11, - 0x0001d02c + 0x4000 * GSI_EE_AP, 0x80); + 0x0001002c + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12, - 0x0001d030 + 0x4000 * GSI_EE_AP, 0x80); + 0x00010030 + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13, - 0x0001d034 + 0x4000 * GSI_EE_AP, 0x80); + 0x00010034 + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0, - 0x0001d048 + 0x4000 * GSI_EE_AP, 0x80); + 0x00010048 + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1, - 0x0001d04c + 0x4000 * GSI_EE_AP, 0x80); + 0x0001004c + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0, 0x00011000 + 0x4000 * GSI_EE_AP, 0x08); diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c index 943d26cbf39f..71712ea25403 100644 --- a/drivers/net/ipvlan/ipvlan_l3s.c +++ b/drivers/net/ipvlan/ipvlan_l3s.c @@ -101,6 +101,7 @@ static unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb, goto out; skb->dev = addr->master->dev; + skb->skb_iif = skb->dev->ifindex; len = skb->len + ETH_HLEN; ipvlan_count_rx(addr->master, len, true, false); out: diff --git a/drivers/net/mdio/acpi_mdio.c b/drivers/net/mdio/acpi_mdio.c index d77c987fda9c..4630dde01974 100644 --- a/drivers/net/mdio/acpi_mdio.c +++ b/drivers/net/mdio/acpi_mdio.c @@ -18,16 +18,18 @@ MODULE_AUTHOR("Calvin Johnson <calvin.johnson@oss.nxp.com>"); MODULE_LICENSE("GPL"); /** - * acpi_mdiobus_register - Register mii_bus and create PHYs from the ACPI ASL. + * __acpi_mdiobus_register - Register mii_bus and create PHYs from the ACPI ASL. * @mdio: pointer to mii_bus structure * @fwnode: pointer to fwnode of MDIO bus. This fwnode is expected to represent + * @owner: module owning this @mdio object. * an ACPI device object corresponding to the MDIO bus and its children are * expected to correspond to the PHY devices on that bus. * * This function registers the mii_bus structure and registers a phy_device * for each child node of @fwnode. */ -int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode) +int __acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode, + struct module *owner) { struct fwnode_handle *child; u32 addr; @@ -35,7 +37,7 @@ int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode) /* Mask out all PHYs from auto probing. */ mdio->phy_mask = GENMASK(31, 0); - ret = mdiobus_register(mdio); + ret = __mdiobus_register(mdio, owner); if (ret) return ret; @@ -55,4 +57,4 @@ int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode) } return 0; } -EXPORT_SYMBOL(acpi_mdiobus_register); +EXPORT_SYMBOL(__acpi_mdiobus_register); diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c index 3847ee92c109..6067d96b2b7b 100644 --- a/drivers/net/mdio/mdio-thunder.c +++ b/drivers/net/mdio/mdio-thunder.c @@ -106,6 +106,7 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev, if (i >= ARRAY_SIZE(nexus->buses)) break; } + fwnode_handle_put(fwn); return 0; err_release_regions: diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c index 510822d6d0d9..1e46e39f5f46 100644 --- a/drivers/net/mdio/of_mdio.c +++ b/drivers/net/mdio/of_mdio.c @@ -139,21 +139,23 @@ bool of_mdiobus_child_is_phy(struct device_node *child) EXPORT_SYMBOL(of_mdiobus_child_is_phy); /** - * of_mdiobus_register - Register mii_bus and create PHYs from the device tree + * __of_mdiobus_register - Register mii_bus and create PHYs from the device tree * @mdio: pointer to mii_bus structure * @np: pointer to device_node of MDIO bus. + * @owner: module owning the @mdio object. * * This function registers the mii_bus structure and registers a phy_device * for each child node of @np. */ -int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) +int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np, + struct module *owner) { struct device_node *child; bool scanphys = false; int addr, rc; if (!np) - return mdiobus_register(mdio); + return __mdiobus_register(mdio, owner); /* Do not continue if the node is disabled */ if (!of_device_is_available(np)) @@ -172,7 +174,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) of_property_read_u32(np, "reset-post-delay-us", &mdio->reset_post_delay_us); /* Register the MDIO bus */ - rc = mdiobus_register(mdio); + rc = __mdiobus_register(mdio, owner); if (rc) return rc; @@ -236,7 +238,7 @@ unregister: mdiobus_unregister(mdio); return rc; } -EXPORT_SYMBOL(of_mdiobus_register); +EXPORT_SYMBOL(__of_mdiobus_register); /** * of_mdio_find_device - Given a device tree node, find the mdio_device diff --git a/drivers/net/phy/mdio_devres.c b/drivers/net/phy/mdio_devres.c index b560e99695df..69b829e6ab35 100644 --- a/drivers/net/phy/mdio_devres.c +++ b/drivers/net/phy/mdio_devres.c @@ -98,13 +98,14 @@ EXPORT_SYMBOL(__devm_mdiobus_register); #if IS_ENABLED(CONFIG_OF_MDIO) /** - * devm_of_mdiobus_register - Resource managed variant of of_mdiobus_register() + * __devm_of_mdiobus_register - Resource managed variant of of_mdiobus_register() * @dev: Device to register mii_bus for * @mdio: MII bus structure to register * @np: Device node to parse + * @owner: Owning module */ -int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio, - struct device_node *np) +int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio, + struct device_node *np, struct module *owner) { struct mdiobus_devres *dr; int ret; @@ -117,7 +118,7 @@ int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio, if (!dr) return -ENOMEM; - ret = of_mdiobus_register(mdio, np); + ret = __of_mdiobus_register(mdio, np, owner); if (ret) { devres_free(dr); return ret; @@ -127,7 +128,7 @@ int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio, devres_add(dev, dr); return 0; } -EXPORT_SYMBOL(devm_of_mdiobus_register); +EXPORT_SYMBOL(__devm_of_mdiobus_register); #endif /* CONFIG_OF_MDIO */ MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 8a13b1ad9a33..62bf99e45af1 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -280,12 +280,9 @@ static int vsc85xx_wol_set(struct phy_device *phydev, u16 pwd[3] = {0, 0, 0}; struct ethtool_wolinfo *wol_conf = wol; - mutex_lock(&phydev->lock); rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2); - if (rc < 0) { - rc = phy_restore_page(phydev, rc, rc); - goto out_unlock; - } + if (rc < 0) + return phy_restore_page(phydev, rc, rc); if (wol->wolopts & WAKE_MAGIC) { /* Store the device address for the magic packet */ @@ -323,7 +320,7 @@ static int vsc85xx_wol_set(struct phy_device *phydev, rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc); if (rc < 0) - goto out_unlock; + return rc; if (wol->wolopts & WAKE_MAGIC) { /* Enable the WOL interrupt */ @@ -331,22 +328,19 @@ static int vsc85xx_wol_set(struct phy_device *phydev, reg_val |= MII_VSC85XX_INT_MASK_WOL; rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val); if (rc) - goto out_unlock; + return rc; } else { /* Disable the WOL interrupt */ reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK); reg_val &= (~MII_VSC85XX_INT_MASK_WOL); rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val); if (rc) - goto out_unlock; + return rc; } /* Clear WOL iterrupt status */ reg_val = phy_read(phydev, MII_VSC85XX_INT_STATUS); -out_unlock: - mutex_unlock(&phydev->lock); - - return rc; + return 0; } static void vsc85xx_wol_get(struct phy_device *phydev, @@ -358,10 +352,9 @@ static void vsc85xx_wol_get(struct phy_device *phydev, u16 pwd[3] = {0, 0, 0}; struct ethtool_wolinfo *wol_conf = wol; - mutex_lock(&phydev->lock); rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2); if (rc < 0) - goto out_unlock; + goto out_restore_page; reg_val = __phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL); if (reg_val & SECURE_ON_ENABLE) @@ -377,9 +370,8 @@ static void vsc85xx_wol_get(struct phy_device *phydev, } } -out_unlock: +out_restore_page: phy_restore_page(phydev, rc, rc > 0 ? 0 : rc); - mutex_unlock(&phydev->lock); } #if IS_ENABLED(CONFIG_OF_MDIO) diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c index 047c581457e3..5813b07242ce 100644 --- a/drivers/net/phy/nxp-c45-tja11xx.c +++ b/drivers/net/phy/nxp-c45-tja11xx.c @@ -79,7 +79,7 @@ #define SGMII_ABILITY BIT(0) #define VEND1_MII_BASIC_CONFIG 0xAFC6 -#define MII_BASIC_CONFIG_REV BIT(8) +#define MII_BASIC_CONFIG_REV BIT(4) #define MII_BASIC_CONFIG_SGMII 0x9 #define MII_BASIC_CONFIG_RGMII 0x7 #define MII_BASIC_CONFIG_RMII 0x5 diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index b33e55a7364e..99a07eb54c44 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -57,6 +57,18 @@ static const char *phy_state_to_str(enum phy_state st) return NULL; } +static void phy_process_state_change(struct phy_device *phydev, + enum phy_state old_state) +{ + if (old_state != phydev->state) { + phydev_dbg(phydev, "PHY state change %s -> %s\n", + phy_state_to_str(old_state), + phy_state_to_str(phydev->state)); + if (phydev->drv && phydev->drv->link_change_notify) + phydev->drv->link_change_notify(phydev); + } +} + static void phy_link_up(struct phy_device *phydev) { phydev->phy_link_change(phydev, true); @@ -1301,6 +1313,7 @@ EXPORT_SYMBOL(phy_free_interrupt); void phy_stop(struct phy_device *phydev) { struct net_device *dev = phydev->attached_dev; + enum phy_state old_state; if (!phy_is_started(phydev) && phydev->state != PHY_DOWN) { WARN(1, "called from state %s\n", @@ -1309,6 +1322,7 @@ void phy_stop(struct phy_device *phydev) } mutex_lock(&phydev->lock); + old_state = phydev->state; if (phydev->state == PHY_CABLETEST) { phy_abort_cable_test(phydev); @@ -1319,6 +1333,7 @@ void phy_stop(struct phy_device *phydev) sfp_upstream_stop(phydev->sfp_bus); phydev->state = PHY_HALTED; + phy_process_state_change(phydev, old_state); mutex_unlock(&phydev->lock); @@ -1436,13 +1451,7 @@ void phy_state_machine(struct work_struct *work) if (err < 0) phy_error(phydev); - if (old_state != phydev->state) { - phydev_dbg(phydev, "PHY state change %s -> %s\n", - phy_state_to_str(old_state), - phy_state_to_str(phydev->state)); - if (phydev->drv && phydev->drv->link_change_notify) - phydev->drv->link_change_notify(phydev); - } + phy_process_state_change(phydev, old_state); /* Only re-schedule a PHY state machine change if we are polling the * PHY, if PHY_MAC_INTERRUPT is set, then we will be moving diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index c02cad6478a8..fb98db61e06c 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -2190,6 +2190,11 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event) break; } + /* Force a poll to re-read the hardware signal state after + * sfp_sm_mod_probe() changed state_hw_mask. + */ + mod_delayed_work(system_wq, &sfp->poll, 1); + err = sfp_hwmon_insert(sfp); if (err) dev_warn(sfp->dev, "hwmon probe failed: %pe\n", diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index 00d9eff91dcf..df2c5435c5c4 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -199,8 +199,11 @@ static int lan95xx_config_aneg_ext(struct phy_device *phydev) static int lan87xx_read_status(struct phy_device *phydev) { struct smsc_phy_priv *priv = phydev->priv; + int err; - int err = genphy_read_status(phydev); + err = genphy_read_status(phydev); + if (err) + return err; if (!phydev->link && priv->energy_enable && phydev->irq == PHY_POLL) { /* Disable EDPD to wake up PHY */ diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 743cbf5d662c..f7cff58fe044 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -666,8 +666,9 @@ static int asix_resume(struct usb_interface *intf) static int ax88772_init_mdio(struct usbnet *dev) { struct asix_common_private *priv = dev->driver_priv; + int ret; - priv->mdio = devm_mdiobus_alloc(&dev->udev->dev); + priv->mdio = mdiobus_alloc(); if (!priv->mdio) return -ENOMEM; @@ -679,7 +680,20 @@ static int ax88772_init_mdio(struct usbnet *dev) snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", dev->udev->bus->busnum, dev->udev->devnum); - return devm_mdiobus_register(&dev->udev->dev, priv->mdio); + ret = mdiobus_register(priv->mdio); + if (ret) { + netdev_err(dev->net, "Could not register MDIO bus (err %d)\n", ret); + mdiobus_free(priv->mdio); + priv->mdio = NULL; + } + + return ret; +} + +static void ax88772_mdio_unregister(struct asix_common_private *priv) +{ + mdiobus_unregister(priv->mdio); + mdiobus_free(priv->mdio); } static int ax88772_init_phy(struct usbnet *dev) @@ -896,16 +910,23 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) ret = ax88772_init_mdio(dev); if (ret) - return ret; + goto mdio_err; ret = ax88772_phylink_setup(dev); if (ret) - return ret; + goto phylink_err; ret = ax88772_init_phy(dev); if (ret) - phylink_destroy(priv->phylink); + goto initphy_err; + return 0; + +initphy_err: + phylink_destroy(priv->phylink); +phylink_err: + ax88772_mdio_unregister(priv); +mdio_err: return ret; } @@ -926,6 +947,7 @@ static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf) phylink_disconnect_phy(priv->phylink); rtnl_unlock(); phylink_destroy(priv->phylink); + ax88772_mdio_unregister(priv); asix_rx_fixup_common_free(dev->driver_priv); } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 068488890d57..c458c030fadf 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -3579,13 +3579,29 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb, size = (rx_cmd_a & RX_CMD_A_LEN_MASK_); align_count = (4 - ((size + RXW_PADDING) % 4)) % 4; + if (unlikely(size > skb->len)) { + netif_dbg(dev, rx_err, dev->net, + "size err rx_cmd_a=0x%08x\n", + rx_cmd_a); + return 0; + } + if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) { netif_dbg(dev, rx_err, dev->net, "Error rx_cmd_a=0x%08x", rx_cmd_a); } else { - u32 frame_len = size - ETH_FCS_LEN; + u32 frame_len; struct sk_buff *skb2; + if (unlikely(size < ETH_FCS_LEN)) { + netif_dbg(dev, rx_err, dev->net, + "size err rx_cmd_a=0x%08x\n", + rx_cmd_a); + return 0; + } + + frame_len = size - ETH_FCS_LEN; + skb2 = napi_alloc_skb(&dev->napi, frame_len); if (!skb2) return 0; diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c index 7a2b0094de51..2894114858a2 100644 --- a/drivers/net/usb/plusb.c +++ b/drivers/net/usb/plusb.c @@ -62,12 +62,6 @@ pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index) } static inline int -pl_clear_QuickLink_features(struct usbnet *dev, int val) -{ - return pl_vendor_req(dev, 1, (u8) val, 0); -} - -static inline int pl_set_QuickLink_features(struct usbnet *dev, int val) { return pl_vendor_req(dev, 3, (u8) val, 0); diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 95de452ff4da..5d6454fedb3f 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -2200,6 +2200,13 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING; align_count = (4 - ((size + RXW_PADDING) % 4)) % 4; + if (unlikely(size > skb->len)) { + netif_dbg(dev, rx_err, dev->net, + "size err rx_cmd_a=0x%08x\n", + rx_cmd_a); + return 0; + } + if (unlikely(rx_cmd_a & RX_CMD_A_RED)) { netif_dbg(dev, rx_err, dev->net, "Error rx_cmd_a=0x%08x\n", rx_cmd_a); diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 32d2c60d334d..563ecd27b93e 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1833,6 +1833,12 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) size = (u16)((header & RX_STS_FL_) >> 16); align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4; + if (unlikely(size > skb->len)) { + netif_dbg(dev, rx_err, dev->net, + "size err header=0x%08x\n", header); + return 0; + } + if (unlikely(header & RX_STS_ES_)) { netif_dbg(dev, rx_err, dev->net, "Error header=0x%08x\n", header); diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 1bb54de7124d..c1178915496d 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -708,7 +708,8 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, u32 frame_sz; if (skb_shared(skb) || skb_head_is_locked(skb) || - skb_shinfo(skb)->nr_frags) { + skb_shinfo(skb)->nr_frags || + skb_headroom(skb) < XDP_PACKET_HEADROOM) { u32 size, len, max_head_size, off; struct sk_buff *nskb; struct page *page; @@ -773,9 +774,6 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, consume_skb(skb); skb = nskb; - } else if (skb_headroom(skb) < XDP_PACKET_HEADROOM && - pskb_expand_head(skb, VETH_XDP_HEADROOM, 0, GFP_ATOMIC)) { - goto drop; } /* SKB "head" area always have tailroom for skb_shared_info */ @@ -1257,6 +1255,26 @@ static int veth_enable_range_safe(struct net_device *dev, int start, int end) return 0; } +static void veth_set_xdp_features(struct net_device *dev) +{ + struct veth_priv *priv = netdev_priv(dev); + struct net_device *peer; + + peer = rtnl_dereference(priv->peer); + if (peer && peer->real_num_tx_queues <= dev->real_num_rx_queues) { + xdp_features_t val = NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_RX_SG; + + if (priv->_xdp_prog || veth_gro_requested(dev)) + val |= NETDEV_XDP_ACT_NDO_XMIT | + NETDEV_XDP_ACT_NDO_XMIT_SG; + xdp_set_features_flag(dev, val); + } else { + xdp_clear_features_flag(dev); + } +} + static int veth_set_channels(struct net_device *dev, struct ethtool_channels *ch) { @@ -1323,6 +1341,12 @@ out: if (peer) netif_carrier_on(peer); } + + /* update XDP supported features */ + veth_set_xdp_features(dev); + if (peer) + veth_set_xdp_features(peer); + return err; revert: @@ -1489,7 +1513,10 @@ static int veth_set_features(struct net_device *dev, err = veth_napi_enable(dev); if (err) return err; + + xdp_features_set_redirect_target(dev, true); } else { + xdp_features_clear_redirect_target(dev); veth_napi_del(dev); } return 0; @@ -1570,10 +1597,15 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog, peer->hw_features &= ~NETIF_F_GSO_SOFTWARE; peer->max_mtu = max_mtu; } + + xdp_features_set_redirect_target(dev, true); } if (old_prog) { if (!prog) { + if (!veth_gro_requested(dev)) + xdp_features_clear_redirect_target(dev); + if (dev->flags & IFF_UP) veth_disable_xdp(dev); @@ -1610,7 +1642,7 @@ static int veth_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp) struct veth_xdp_buff *_ctx = (void *)ctx; if (!_ctx->skb) - return -EOPNOTSUPP; + return -ENODATA; *timestamp = skb_hwtstamps(_ctx->skb)->hwtstamp; return 0; @@ -1621,7 +1653,7 @@ static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash) struct veth_xdp_buff *_ctx = (void *)ctx; if (!_ctx->skb) - return -EOPNOTSUPP; + return -ENODATA; *hash = skb_get_hash(_ctx->skb); return 0; @@ -1686,10 +1718,6 @@ static void veth_setup(struct net_device *dev) dev->hw_enc_features = VETH_FEATURES; dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE; netif_set_tso_max_size(dev, GSO_MAX_SIZE); - - dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | - NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG | - NETDEV_XDP_ACT_NDO_XMIT_SG; } /* @@ -1857,6 +1885,10 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, goto err_queues; veth_disable_gro(dev); + /* update XDP supported features */ + veth_set_xdp_features(dev); + veth_set_xdp_features(peer); + return 0; err_queues: diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index fb5e68ed3ec2..2396c28c0122 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -446,7 +446,8 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) static struct sk_buff *page_to_skb(struct virtnet_info *vi, struct receive_queue *rq, struct page *page, unsigned int offset, - unsigned int len, unsigned int truesize) + unsigned int len, unsigned int truesize, + unsigned int headroom) { struct sk_buff *skb; struct virtio_net_hdr_mrg_rxbuf *hdr; @@ -464,11 +465,11 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, else hdr_padded_len = sizeof(struct padded_vnet_hdr); - buf = p; + buf = p - headroom; len -= hdr_len; offset += hdr_padded_len; p += hdr_padded_len; - tailroom = truesize - hdr_padded_len - len; + tailroom = truesize - headroom - hdr_padded_len - len; shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); @@ -545,6 +546,87 @@ ok: return skb; } +static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) +{ + unsigned int len; + unsigned int packets = 0; + unsigned int bytes = 0; + void *ptr; + + while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { + if (likely(!is_xdp_frame(ptr))) { + struct sk_buff *skb = ptr; + + pr_debug("Sent skb %p\n", skb); + + bytes += skb->len; + napi_consume_skb(skb, in_napi); + } else { + struct xdp_frame *frame = ptr_to_xdp(ptr); + + bytes += xdp_get_frame_len(frame); + xdp_return_frame(frame); + } + packets++; + } + + /* Avoid overhead when no packets have been processed + * happens when called speculatively from start_xmit. + */ + if (!packets) + return; + + u64_stats_update_begin(&sq->stats.syncp); + sq->stats.bytes += bytes; + sq->stats.packets += packets; + u64_stats_update_end(&sq->stats.syncp); +} + +static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) +{ + if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) + return false; + else if (q < vi->curr_queue_pairs) + return true; + else + return false; +} + +static void check_sq_full_and_disable(struct virtnet_info *vi, + struct net_device *dev, + struct send_queue *sq) +{ + bool use_napi = sq->napi.weight; + int qnum; + + qnum = sq - vi->sq; + + /* If running out of space, stop queue to avoid getting packets that we + * are then unable to transmit. + * An alternative would be to force queuing layer to requeue the skb by + * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be + * returned in a normal path of operation: it means that driver is not + * maintaining the TX queue stop/start state properly, and causes + * the stack to do a non-trivial amount of useless work. + * Since most packets only take 1 or 2 ring slots, stopping the queue + * early means 16 slots are typically wasted. + */ + if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { + netif_stop_subqueue(dev, qnum); + if (use_napi) { + if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) + virtqueue_napi_schedule(&sq->napi, sq->vq); + } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { + /* More just got used, free them then recheck. */ + free_old_xmit_skbs(sq, false); + if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { + netif_start_subqueue(dev, qnum); + virtqueue_disable_cb(sq->vq); + } + } + } +} + static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, struct send_queue *sq, struct xdp_frame *xdpf) @@ -686,6 +768,9 @@ static int virtnet_xdp_xmit(struct net_device *dev, } ret = nxmit; + if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) + check_sq_full_and_disable(vi, dev, sq); + if (flags & XDP_XMIT_FLUSH) { if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) kicks = 1; @@ -925,7 +1010,7 @@ static struct sk_buff *receive_big(struct net_device *dev, { struct page *page = buf; struct sk_buff *skb = - page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); + page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0); stats->bytes += len - vi->hdr_len; if (unlikely(!skb)) @@ -1188,9 +1273,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, switch (act) { case XDP_PASS: + head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz); + if (unlikely(!head_skb)) + goto err_xdp_frags; + if (unlikely(xdp_page != page)) put_page(page); - head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz); rcu_read_unlock(); return head_skb; case XDP_TX: @@ -1248,7 +1336,7 @@ err_xdp_frags: rcu_read_unlock(); skip_xdp: - head_skb = page_to_skb(vi, rq, page, offset, len, truesize); + head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom); curr_skb = head_skb; if (unlikely(!curr_skb)) @@ -1714,52 +1802,6 @@ static int virtnet_receive(struct receive_queue *rq, int budget, return stats.packets; } -static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) -{ - unsigned int len; - unsigned int packets = 0; - unsigned int bytes = 0; - void *ptr; - - while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { - if (likely(!is_xdp_frame(ptr))) { - struct sk_buff *skb = ptr; - - pr_debug("Sent skb %p\n", skb); - - bytes += skb->len; - napi_consume_skb(skb, in_napi); - } else { - struct xdp_frame *frame = ptr_to_xdp(ptr); - - bytes += xdp_get_frame_len(frame); - xdp_return_frame(frame); - } - packets++; - } - - /* Avoid overhead when no packets have been processed - * happens when called speculatively from start_xmit. - */ - if (!packets) - return; - - u64_stats_update_begin(&sq->stats.syncp); - sq->stats.bytes += bytes; - sq->stats.packets += packets; - u64_stats_update_end(&sq->stats.syncp); -} - -static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) -{ - if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) - return false; - else if (q < vi->curr_queue_pairs) - return true; - else - return false; -} - static void virtnet_poll_cleantx(struct receive_queue *rq) { struct virtnet_info *vi = rq->vq->vdev->priv; @@ -1989,30 +2031,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) nf_reset_ct(skb); } - /* If running out of space, stop queue to avoid getting packets that we - * are then unable to transmit. - * An alternative would be to force queuing layer to requeue the skb by - * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be - * returned in a normal path of operation: it means that driver is not - * maintaining the TX queue stop/start state properly, and causes - * the stack to do a non-trivial amount of useless work. - * Since most packets only take 1 or 2 ring slots, stopping the queue - * early means 16 slots are typically wasted. - */ - if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { - netif_stop_subqueue(dev, qnum); - if (use_napi) { - if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) - virtqueue_napi_schedule(&sq->napi, sq->vq); - } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { - /* More just got used, free them then recheck. */ - free_old_xmit_skbs(sq, false); - if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { - netif_start_subqueue(dev, qnum); - virtqueue_disable_cb(sq->vq); - } - } - } + check_sq_full_and_disable(vi, dev, sq); if (kick || netif_xmit_stopped(txq)) { if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 1c53b5546927..47c2ad7a3e42 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -1177,14 +1177,9 @@ static int ucc_hdlc_probe(struct platform_device *pdev) uhdlc_priv->dev = &pdev->dev; uhdlc_priv->ut_info = ut_info; - if (of_get_property(np, "fsl,tdm-interface", NULL)) - uhdlc_priv->tsa = 1; - - if (of_get_property(np, "fsl,ucc-internal-loopback", NULL)) - uhdlc_priv->loopback = 1; - - if (of_get_property(np, "fsl,hdlc-bus", NULL)) - uhdlc_priv->hdlc_bus = 1; + uhdlc_priv->tsa = of_property_read_bool(np, "fsl,tdm-interface"); + uhdlc_priv->loopback = of_property_read_bool(np, "fsl,ucc-internal-loopback"); + uhdlc_priv->hdlc_bus = of_property_read_bool(np, "fsl,hdlc-bus"); if (uhdlc_priv->tsa == 1) { utdm = kzalloc(sizeof(*utdm), GFP_KERNEL); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 565522466eba..b55b1b17f4d1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -732,7 +732,10 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) rcu_read_lock(); do { - while (likely(!mvmtxq->stopped && + while (likely(!test_bit(IWL_MVM_TXQ_STATE_STOP_FULL, + &mvmtxq->state) && + !test_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, + &mvmtxq->state) && !test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) { skb = ieee80211_tx_dequeue(hw, txq); @@ -757,42 +760,25 @@ static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); - /* - * Please note that racing is handled very carefully here: - * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is - * deleted afterwards. - * This means that if: - * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list): - * queue is allocated and we can TX. - * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list): - * a race, should defer the frame. - * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list): - * need to allocate the queue and defer the frame. - * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list): - * queue is already scheduled for allocation, no need to allocate, - * should defer the frame. - */ - - /* If the queue is allocated TX and return. */ - if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) { - /* - * Check that list is empty to avoid a race where txq_id is - * already updated, but the queue allocation work wasn't - * finished - */ - if (unlikely(txq->sta && !list_empty(&mvmtxq->list))) - return; - + if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) || + !txq->sta) { iwl_mvm_mac_itxq_xmit(hw, txq); return; } - /* The list is being deleted only after the queue is fully allocated. */ - if (!list_empty(&mvmtxq->list)) - return; + /* iwl_mvm_mac_itxq_xmit() will later be called by the worker + * to handle any packets we leave on the txq now + */ - list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs); - schedule_work(&mvm->add_stream_wk); + spin_lock_bh(&mvm->add_stream_lock); + /* The list is being deleted only after the queue is fully allocated. */ + if (list_empty(&mvmtxq->list) && + /* recheck under lock */ + !test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) { + list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs); + schedule_work(&mvm->add_stream_wk); + } + spin_unlock_bh(&mvm->add_stream_lock); } #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 90bc95d96a78..f307c345dfa0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -729,7 +729,10 @@ struct iwl_mvm_txq { struct list_head list; u16 txq_id; atomic_t tx_request; - bool stopped; +#define IWL_MVM_TXQ_STATE_STOP_FULL 0 +#define IWL_MVM_TXQ_STATE_STOP_REDIRECT 1 +#define IWL_MVM_TXQ_STATE_READY 2 + unsigned long state; }; static inline struct iwl_mvm_txq * @@ -827,6 +830,7 @@ struct iwl_mvm { struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES]; }; struct work_struct add_stream_wk; /* To add streams to queues */ + spinlock_t add_stream_lock; const char *nvm_file_name; struct iwl_nvm_data *nvm_data; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index f4e9446d9dc2..9711841bb456 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -1195,6 +1195,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); INIT_LIST_HEAD(&mvm->add_stream_txqs); + spin_lock_init(&mvm->add_stream_lock); init_waitqueue_head(&mvm->rx_sync_waitq); @@ -1691,7 +1692,10 @@ static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode, txq = sta->txq[tid]; mvmtxq = iwl_mvm_txq_from_mac80211(txq); - mvmtxq->stopped = !start; + if (start) + clear_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state); + else + set_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state); if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST) iwl_mvm_mac_itxq_xmit(mvm->hw, txq); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 69634fb82a9b..9caae77995ca 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -384,8 +384,11 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_tid(sta, tid); - mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; + spin_lock_bh(&mvm->add_stream_lock); list_del_init(&mvmtxq->list); + clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); + mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; + spin_unlock_bh(&mvm->add_stream_lock); } /* Regardless if this is a reserved TXQ for a STA - mark it as false */ @@ -479,8 +482,11 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) disable_agg_tids |= BIT(tid); mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; - mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; + spin_lock_bh(&mvm->add_stream_lock); list_del_init(&mvmtxq->list); + clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); + mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; + spin_unlock_bh(&mvm->add_stream_lock); } mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ @@ -693,7 +699,7 @@ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid, queue, iwl_mvm_ac_to_tx_fifo[ac]); /* Stop the queue and wait for it to empty */ - txq->stopped = true; + set_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state); ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue)); if (ret) { @@ -736,7 +742,7 @@ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid, out: /* Continue using the queue */ - txq->stopped = false; + clear_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state); return ret; } @@ -1444,12 +1450,22 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) * a queue in the function itself. */ if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) { + spin_lock_bh(&mvm->add_stream_lock); list_del_init(&mvmtxq->list); + spin_unlock_bh(&mvm->add_stream_lock); continue; } - list_del_init(&mvmtxq->list); + /* now we're ready, any remaining races/concurrency will be + * handled in iwl_mvm_mac_itxq_xmit() + */ + set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); + local_bh_disable(); + spin_lock(&mvm->add_stream_lock); + list_del_init(&mvmtxq->list); + spin_unlock(&mvm->add_stream_lock); + iwl_mvm_mac_itxq_xmit(mvm->hw, txq); local_bh_enable(); } @@ -1864,8 +1880,11 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(sta->txq[i]); + spin_lock_bh(&mvm->add_stream_lock); mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; list_del_init(&mvmtxq->list); + clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); + spin_unlock_bh(&mvm->add_stream_lock); } } diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 5dcf61761a16..9a698a16a8f3 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -172,7 +172,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8997 = { .can_ext_scan = true, }; -static const struct of_device_id mwifiex_pcie_of_match_table[] = { +static const struct of_device_id mwifiex_pcie_of_match_table[] __maybe_unused = { { .compatible = "pci11ab,2b42" }, { .compatible = "pci1b4b,2b42" }, { } diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index c64e24c10ea6..a24bd40dd41a 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -495,7 +495,7 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = { {"EXTLAST", NULL, 0, 0xFE}, }; -static const struct of_device_id mwifiex_sdio_of_match_table[] = { +static const struct of_device_id mwifiex_sdio_of_match_table[] __maybe_unused = { { .compatible = "marvell,sd8787" }, { .compatible = "marvell,sd8897" }, { .compatible = "marvell,sd8978" }, diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index b117e4467c87..34abf70f44af 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -539,6 +539,7 @@ int mt76_register_phy(struct mt76_phy *phy, bool vht, if (ret) return ret; + set_bit(MT76_STATE_REGISTERED, &phy->state); phy->dev->phys[phy->band_idx] = phy; return 0; @@ -549,6 +550,9 @@ void mt76_unregister_phy(struct mt76_phy *phy) { struct mt76_dev *dev = phy->dev; + if (!test_bit(MT76_STATE_REGISTERED, &phy->state)) + return; + if (IS_ENABLED(CONFIG_MT76_LEDS)) mt76_led_cleanup(phy); mt76_tx_status_check(dev, true); @@ -719,6 +723,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, return ret; WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx")); + set_bit(MT76_STATE_REGISTERED, &phy->state); sched_set_fifo_low(dev->tx_worker.task); return 0; @@ -729,6 +734,9 @@ void mt76_unregister_device(struct mt76_dev *dev) { struct ieee80211_hw *hw = dev->hw; + if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state)) + return; + if (IS_ENABLED(CONFIG_MT76_LEDS)) mt76_led_cleanup(&dev->phy); mt76_tx_status_check(dev, true); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index ccca0162c8f8..183b0fc5a2d4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -402,6 +402,7 @@ struct mt76_tx_cb { enum { MT76_STATE_INITIALIZED, + MT76_STATE_REGISTERED, MT76_STATE_RUNNING, MT76_STATE_MCU_RUNNING, MT76_SCANNING, diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index efb9bfaa187f..008ece1b16f8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -1221,6 +1221,9 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba_tlv); int mt76_connac_mcu_sta_wed_update(struct mt76_dev *dev, struct sk_buff *skb) { + if (!mt76_is_mmio(dev)) + return 0; + if (!mtk_wed_device_active(&dev->mmio.wed)) return 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c index 1ab768feccaa..5e288116b1b0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -383,7 +383,6 @@ mt7915_init_wiphy(struct mt7915_phy *phy) ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD); ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); ieee80211_hw_set(hw, WANT_MONITOR_VIF); - ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); hw->max_tx_fragments = 4; @@ -396,6 +395,9 @@ mt7915_init_wiphy(struct mt7915_phy *phy) } if (phy->mt76->cap.has_5ghz) { + struct ieee80211_sta_vht_cap *vht_cap; + + vht_cap = &phy->mt76->sband_5g.sband.vht_cap; phy->mt76->sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING | IEEE80211_HT_CAP_MAX_AMSDU; @@ -403,19 +405,28 @@ mt7915_init_wiphy(struct mt7915_phy *phy) IEEE80211_HT_MPDU_DENSITY_4; if (is_mt7915(&dev->mt76)) { - phy->mt76->sband_5g.sband.vht_cap.cap |= + vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; + + if (!dev->dbdc_support) + vht_cap->cap |= + IEEE80211_VHT_CAP_SHORT_GI_160 | + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ | + FIELD_PREP(IEEE80211_VHT_CAP_EXT_NSS_BW_MASK, 1); } else { - phy->mt76->sband_5g.sband.vht_cap.cap |= + vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; /* mt7916 dbdc with 2g 2x2 bw40 and 5g 2x2 bw160c */ - phy->mt76->sband_5g.sband.vht_cap.cap |= + vht_cap->cap |= IEEE80211_VHT_CAP_SHORT_GI_160 | IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; } + + if (!is_mt7915(&dev->mt76) || !dev->dbdc_support) + ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); } mt76_set_stream_caps(phy->mt76, true); @@ -841,9 +852,13 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_phy *phy, int sts = hweight8(phy->mt76->chainmask); u8 c, sts_160 = sts; - /* mt7915 doesn't support bw160 */ - if (is_mt7915(&dev->mt76)) - sts_160 = 0; + /* Can do 1/2 of STS in 160Mhz mode for mt7915 */ + if (is_mt7915(&dev->mt76)) { + if (!dev->dbdc_support) + sts_160 /= 2; + else + sts_160 = 0; + } #ifdef CONFIG_MAC80211_MESH if (vif == NL80211_IFTYPE_MESH_POINT) @@ -944,10 +959,15 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band, int i, idx = 0, nss = hweight8(phy->mt76->antenna_mask); u16 mcs_map = 0; u16 mcs_map_160 = 0; - u8 nss_160 = nss; + u8 nss_160; - /* Can't do 160MHz with mt7915 */ - if (is_mt7915(&dev->mt76)) + if (!is_mt7915(&dev->mt76)) + nss_160 = nss; + else if (!dev->dbdc_support) + /* Can do 1/2 of NSS streams in 160Mhz mode for mt7915 */ + nss_160 = nss / 2; + else + /* Can't do 160MHz with mt7915 dbdc */ nss_160 = 0; for (i = 0; i < 8; i++) { diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index 2d2edddc77bd..3f88e6a0a510 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -447,8 +447,7 @@ static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue, dev_info(&spi->dev, "selected chip family is %s\n", pdev_data->family->name); - if (of_find_property(dt_node, "clock-xtal", NULL)) - pdev_data->ref_clock_xtal = true; + pdev_data->ref_clock_xtal = of_property_read_bool(dt_node, "clock-xtal"); /* optional clock frequency params */ of_property_read_u32(dt_node, "ref-clock-frequency", diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c index ed9c5e2cf3ad..a187f0e0b0f7 100644 --- a/drivers/nfc/pn533/usb.c +++ b/drivers/nfc/pn533/usb.c @@ -175,6 +175,7 @@ static int pn533_usb_send_frame(struct pn533 *dev, print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1, out->data, out->len, false); + arg.phy = phy; init_completion(&arg.done); cntx = phy->out_urb->context; phy->out_urb->context = &arg; diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c index 755460a73c0d..d2aa9f766738 100644 --- a/drivers/nfc/st-nci/ndlc.c +++ b/drivers/nfc/st-nci/ndlc.c @@ -282,13 +282,15 @@ EXPORT_SYMBOL(ndlc_probe); void ndlc_remove(struct llt_ndlc *ndlc) { - st_nci_remove(ndlc->ndev); - /* cancel timers */ del_timer_sync(&ndlc->t1_timer); del_timer_sync(&ndlc->t2_timer); ndlc->t2_active = false; ndlc->t1_active = false; + /* cancel work */ + cancel_work_sync(&ndlc->sm_work); + + st_nci_remove(ndlc->ndev); skb_queue_purge(&ndlc->rcv_q); skb_queue_purge(&ndlc->send_q); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index c2730b116dc6..53ef028596c6 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -781,16 +781,26 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, range = page_address(ns->ctrl->discard_page); } - __rq_for_each_bio(bio, req) { - u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector); - u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; - - if (n < segments) { - range[n].cattr = cpu_to_le32(0); - range[n].nlb = cpu_to_le32(nlb); - range[n].slba = cpu_to_le64(slba); + if (queue_max_discard_segments(req->q) == 1) { + u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req)); + u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9); + + range[0].cattr = cpu_to_le32(0); + range[0].nlb = cpu_to_le32(nlb); + range[0].slba = cpu_to_le64(slba); + n = 1; + } else { + __rq_for_each_bio(bio, req) { + u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector); + u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; + + if (n < segments) { + range[n].cattr = cpu_to_le32(0); + range[n].nlb = cpu_to_le32(nlb); + range[n].slba = cpu_to_le64(slba); + } + n++; } - n++; } if (WARN_ON_ONCE(n != segments)) { @@ -3053,7 +3063,8 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl) else ctrl->max_zeroes_sectors = 0; - if (nvme_ctrl_limited_cns(ctrl)) + if (ctrl->subsys->subtype != NVME_NQN_NVME || + nvme_ctrl_limited_cns(ctrl)) return 0; id = kzalloc(sizeof(*id), GFP_KERNEL); diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 723e7d5b778f..d24ea2e05156 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -464,7 +464,8 @@ static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu( return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu; } -static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd) +static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd, + unsigned issue_flags) { struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); struct request *req = pdu->req; @@ -485,17 +486,18 @@ static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd) blk_rq_unmap_user(req->bio); blk_mq_free_request(req); - io_uring_cmd_done(ioucmd, status, result); + io_uring_cmd_done(ioucmd, status, result, issue_flags); } -static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd) +static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd, + unsigned issue_flags) { struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); if (pdu->bio) blk_rq_unmap_user(pdu->bio); - io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result); + io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result, issue_flags); } static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req, @@ -517,7 +519,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req, * Otherwise, move the completion to task work. */ if (cookie != NULL && blk_rq_is_poll(req)) - nvme_uring_task_cb(ioucmd); + nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED); else io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb); @@ -539,7 +541,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req, * Otherwise, move the completion to task work. */ if (cookie != NULL && blk_rq_is_poll(req)) - nvme_uring_task_meta_cb(ioucmd); + nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED); else io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_meta_cb); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index fc39d01e7b63..9171452e2f6d 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -123,9 +123,8 @@ void nvme_mpath_start_request(struct request *rq) return; nvme_req(rq)->flags |= NVME_MPATH_IO_STATS; - nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0, - blk_rq_bytes(rq) >> SECTOR_SHIFT, - req_op(rq), jiffies); + nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0, req_op(rq), + jiffies); } EXPORT_SYMBOL_GPL(nvme_mpath_start_request); @@ -136,7 +135,8 @@ void nvme_mpath_end_request(struct request *rq) if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS)) return; bdev_end_io_acct(ns->head->disk->part0, req_op(rq), - nvme_req(rq)->start_time); + blk_rq_bytes(rq) >> SECTOR_SHIFT, + nvme_req(rq)->start_time); } void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 5b95c94ee40f..b615906263f3 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3073,6 +3073,7 @@ out_dev_unmap: nvme_dev_unmap(dev); out_uninit_ctrl: nvme_uninit_ctrl(&dev->ctrl); + nvme_put_ctrl(&dev->ctrl); return result; } @@ -3415,6 +3416,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x1f40, 0x1202), /* Netac Technologies Co. NV3000 NVMe SSD */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1f40, 0x5236), /* Netac Technologies Co. NV7000 NVMe SSD */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */ @@ -3435,6 +3438,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 7723a4989524..42c0598c31f2 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -208,6 +208,18 @@ static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue) return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; } +static inline void *nvme_tcp_req_cmd_pdu(struct nvme_tcp_request *req) +{ + return req->pdu; +} + +static inline void *nvme_tcp_req_data_pdu(struct nvme_tcp_request *req) +{ + /* use the pdu space in the back for the data pdu */ + return req->pdu + sizeof(struct nvme_tcp_cmd_pdu) - + sizeof(struct nvme_tcp_data_pdu); +} + static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req) { if (nvme_is_fabrics(req->req.cmd)) @@ -614,7 +626,7 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req) { - struct nvme_tcp_data_pdu *data = req->pdu; + struct nvme_tcp_data_pdu *data = nvme_tcp_req_data_pdu(req); struct nvme_tcp_queue *queue = req->queue; struct request *rq = blk_mq_rq_from_pdu(req); u32 h2cdata_sent = req->pdu_len; @@ -1038,7 +1050,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req) { struct nvme_tcp_queue *queue = req->queue; - struct nvme_tcp_cmd_pdu *pdu = req->pdu; + struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req); bool inline_data = nvme_tcp_has_inline_data(req); u8 hdgst = nvme_tcp_hdgst_len(queue); int len = sizeof(*pdu) + hdgst - req->offset; @@ -1077,7 +1089,7 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req) static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req) { struct nvme_tcp_queue *queue = req->queue; - struct nvme_tcp_data_pdu *pdu = req->pdu; + struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req); u8 hdgst = nvme_tcp_hdgst_len(queue); int len = sizeof(*pdu) - req->offset + hdgst; int ret; @@ -2284,7 +2296,7 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq) { struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; - struct nvme_tcp_cmd_pdu *pdu = req->pdu; + struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req); u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype; int qid = nvme_tcp_queue_id(req->queue); @@ -2323,7 +2335,7 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue, struct request *rq) { struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); - struct nvme_tcp_cmd_pdu *pdu = req->pdu; + struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req); struct nvme_command *c = &pdu->cmd; c->common.flags |= NVME_CMD_SGL_METABUF; @@ -2343,7 +2355,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, struct request *rq) { struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); - struct nvme_tcp_cmd_pdu *pdu = req->pdu; + struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req); struct nvme_tcp_queue *queue = req->queue; u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0; blk_status_t ret; @@ -2682,6 +2694,15 @@ static struct nvmf_transport_ops nvme_tcp_transport = { static int __init nvme_tcp_init_module(void) { + BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8); + BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72); + BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24); + BUILD_BUG_ON(sizeof(struct nvme_tcp_rsp_pdu) != 24); + BUILD_BUG_ON(sizeof(struct nvme_tcp_r2t_pdu) != 24); + BUILD_BUG_ON(sizeof(struct nvme_tcp_icreq_pdu) != 128); + BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128); + BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24); + nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); if (!nvme_tcp_wq) diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index f66ed13d7c11..3935165048e7 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -756,8 +756,10 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status) void nvmet_req_complete(struct nvmet_req *req, u16 status) { + struct nvmet_sq *sq = req->sq; + __nvmet_req_complete(req, status); - percpu_ref_put(&req->sq->ref); + percpu_ref_put(&sq->ref); } EXPORT_SYMBOL_GPL(nvmet_req_complete); diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 174ef3574e07..22024b830788 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -1231,7 +1231,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) "#nvmem-cell-cells", index, &cell_spec); if (ret) - return ERR_PTR(ret); + return ERR_PTR(-ENOENT); if (cell_spec.args_count > 1) return ERR_PTR(-EINVAL); diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 83ae838ceb5f..549c4bd5caec 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -76,6 +76,27 @@ struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n) } EXPORT_SYMBOL_GPL(pci_bus_resource_n); +void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res) +{ + struct pci_bus_resource *bus_res, *tmp; + int i; + + for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { + if (bus->resource[i] == res) { + bus->resource[i] = NULL; + return; + } + } + + list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) { + if (bus_res->res == res) { + list_del(&bus_res->list); + kfree(bus_res); + return; + } + } +} + void pci_bus_remove_resources(struct pci_bus *bus) { int i; diff --git a/drivers/platform/chrome/cros_ec_chardev.c b/drivers/platform/chrome/cros_ec_chardev.c index 0de7c255254e..d6de5a294128 100644 --- a/drivers/platform/chrome/cros_ec_chardev.c +++ b/drivers/platform/chrome/cros_ec_chardev.c @@ -284,7 +284,7 @@ static long cros_ec_chardev_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg) u_cmd.insize > EC_MAX_MSG_BYTES) return -EINVAL; - s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize), + s_cmd = kzalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize), GFP_KERNEL); if (!s_cmd) return -ENOMEM; diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c index 8e6f8a655079..05f413178462 100644 --- a/drivers/power/supply/axp288_fuel_gauge.c +++ b/drivers/power/supply/axp288_fuel_gauge.c @@ -724,6 +724,8 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev) for (i = 0; i < AXP288_FG_INTR_NUM; i++) { pirq = platform_get_irq(pdev, i); + if (pirq < 0) + continue; ret = regmap_irq_get_virq(axp20x->regmap_irqc, pirq); if (ret < 0) return dev_err_probe(dev, ret, "getting vIRQ %d\n", pirq); diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c index be34b9848450..de67b985f0a9 100644 --- a/drivers/power/supply/bq24190_charger.c +++ b/drivers/power/supply/bq24190_charger.c @@ -1906,6 +1906,7 @@ static void bq24190_remove(struct i2c_client *client) struct bq24190_dev_info *bdi = i2c_get_clientdata(client); int error; + cancel_delayed_work_sync(&bdi->input_current_limit_work); error = pm_runtime_resume_and_get(bdi->dev); if (error < 0) dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error); diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c index cadb6a0c2cc7..b6c96376776a 100644 --- a/drivers/power/supply/cros_usbpd-charger.c +++ b/drivers/power/supply/cros_usbpd-charger.c @@ -276,7 +276,7 @@ static int cros_usbpd_charger_get_power_info(struct port_data *port) port->psy_current_max = 0; break; default: - dev_err(dev, "Port %d: default case!\n", port->port_number); + dev_dbg(dev, "Port %d: default case!\n", port->port_number); port->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP; } diff --git a/drivers/power/supply/da9150-charger.c b/drivers/power/supply/da9150-charger.c index 14da5c595dd9..a87aeaea38e1 100644 --- a/drivers/power/supply/da9150-charger.c +++ b/drivers/power/supply/da9150-charger.c @@ -657,6 +657,7 @@ static int da9150_charger_remove(struct platform_device *pdev) if (!IS_ERR_OR_NULL(charger->usb_phy)) usb_unregister_notifier(charger->usb_phy, &charger->otg_nb); + cancel_work_sync(&charger->otg_work); power_supply_unregister(charger->battery); power_supply_unregister(charger->usb); diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c index 4f9c1c417916..36f807b5ec44 100644 --- a/drivers/power/supply/rk817_charger.c +++ b/drivers/power/supply/rk817_charger.c @@ -785,8 +785,6 @@ rk817_read_or_set_full_charge_on_boot(struct rk817_charger *charger, regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3, bulk_reg, 4); tmp = get_unaligned_be32(bulk_reg); - if (tmp < 0) - tmp = 0; boot_charge_mah = ADC_TO_CHARGE_UAH(tmp, charger->res_div) / 1000; /* @@ -825,8 +823,6 @@ rk817_read_or_set_full_charge_on_boot(struct rk817_charger *charger, regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3, bulk_reg, 4); tmp = get_unaligned_be32(bulk_reg); - if (tmp < 0) - tmp = 0; boot_charge_mah = ADC_TO_CHARGE_UAH(tmp, charger->res_div) / 1000; regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_OCV_VOL_H, bulk_reg, 2); diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 362fa631f39b..a226dc1b65d7 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -1145,10 +1145,12 @@ static int alua_activate(struct scsi_device *sdev, rcu_read_unlock(); mutex_unlock(&h->init_mutex); - if (alua_rtpg_queue(pg, sdev, qdata, true)) + if (alua_rtpg_queue(pg, sdev, qdata, true)) { fn = NULL; - else + } else { + kfree(qdata); err = SCSI_DH_DEV_OFFLINED; + } kref_put(&pg->kref, release_port_group); out: if (fn) diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index f7f62e56afca..9b6fbbe15d92 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -341,9 +341,6 @@ static void scsi_host_dev_release(struct device *dev) struct Scsi_Host *shost = dev_to_shost(dev); struct device *parent = dev->parent; - /* In case scsi_remove_host() has not been called. */ - scsi_proc_hostdir_rm(shost->hostt); - /* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */ rcu_barrier(); diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h index 40f238fa80cc..364fb1b5e45a 100644 --- a/drivers/scsi/mpi3mr/mpi3mr.h +++ b/drivers/scsi/mpi3mr/mpi3mr.h @@ -1393,4 +1393,6 @@ void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc); void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc); void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc); int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc); +void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc, + struct mpi3mr_sas_node *sas_expander); #endif /*MPI3MR_H_INCLUDED*/ diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c index 29acf6111db3..a565817aa56d 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_fw.c +++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c @@ -3837,29 +3837,34 @@ retry_init: mpi3mr_print_ioc_info(mrioc); - dprint_init(mrioc, "allocating config page buffers\n"); - mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev, - MPI3MR_DEFAULT_CFG_PAGE_SZ, &mrioc->cfg_page_dma, GFP_KERNEL); if (!mrioc->cfg_page) { - retval = -1; - goto out_failed_noretry; + dprint_init(mrioc, "allocating config page buffers\n"); + mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ; + mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev, + mrioc->cfg_page_sz, &mrioc->cfg_page_dma, GFP_KERNEL); + if (!mrioc->cfg_page) { + retval = -1; + goto out_failed_noretry; + } } - mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ; - - retval = mpi3mr_alloc_reply_sense_bufs(mrioc); - if (retval) { - ioc_err(mrioc, - "%s :Failed to allocated reply sense buffers %d\n", - __func__, retval); - goto out_failed_noretry; + if (!mrioc->init_cmds.reply) { + retval = mpi3mr_alloc_reply_sense_bufs(mrioc); + if (retval) { + ioc_err(mrioc, + "%s :Failed to allocated reply sense buffers %d\n", + __func__, retval); + goto out_failed_noretry; + } } - retval = mpi3mr_alloc_chain_bufs(mrioc); - if (retval) { - ioc_err(mrioc, "Failed to allocated chain buffers %d\n", - retval); - goto out_failed_noretry; + if (!mrioc->chain_sgl_list) { + retval = mpi3mr_alloc_chain_bufs(mrioc); + if (retval) { + ioc_err(mrioc, "Failed to allocated chain buffers %d\n", + retval); + goto out_failed_noretry; + } } retval = mpi3mr_issue_iocinit(mrioc); @@ -4382,13 +4387,20 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc) mrioc->admin_req_base, mrioc->admin_req_dma); mrioc->admin_req_base = NULL; } - + if (mrioc->cfg_page) { + dma_free_coherent(&mrioc->pdev->dev, mrioc->cfg_page_sz, + mrioc->cfg_page, mrioc->cfg_page_dma); + mrioc->cfg_page = NULL; + } if (mrioc->pel_seqnum_virt) { dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz, mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma); mrioc->pel_seqnum_virt = NULL; } + kfree(mrioc->throttle_groups); + mrioc->throttle_groups = NULL; + kfree(mrioc->logdata_buf); mrioc->logdata_buf = NULL; diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c index a794cc8a1c0b..6d55698ea4d1 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_os.c +++ b/drivers/scsi/mpi3mr/mpi3mr_os.c @@ -5078,6 +5078,8 @@ static void mpi3mr_remove(struct pci_dev *pdev) struct workqueue_struct *wq; unsigned long flags; struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; + struct mpi3mr_hba_port *port, *hba_port_next; + struct mpi3mr_sas_node *sas_expander, *sas_expander_next; if (!shost) return; @@ -5117,6 +5119,28 @@ static void mpi3mr_remove(struct pci_dev *pdev) mpi3mr_free_mem(mrioc); mpi3mr_cleanup_resources(mrioc); + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + list_for_each_entry_safe_reverse(sas_expander, sas_expander_next, + &mrioc->sas_expander_list, list) { + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + mpi3mr_expander_node_remove(mrioc, sas_expander); + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + } + list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) { + ioc_info(mrioc, + "removing hba_port entry: %p port: %d from hba_port list\n", + port, port->port_id); + list_del(&port->list); + kfree(port); + } + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + + if (mrioc->sas_hba.num_phys) { + kfree(mrioc->sas_hba.phy); + mrioc->sas_hba.phy = NULL; + mrioc->sas_hba.num_phys = 0; + } + spin_lock(&mrioc_list_lock); list_del(&mrioc->list); spin_unlock(&mrioc_list_lock); diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c index be25f242fa79..5748bd9369ff 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_transport.c +++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c @@ -9,9 +9,6 @@ #include "mpi3mr.h" -static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc, - struct mpi3mr_sas_node *sas_expander); - /** * mpi3mr_post_transport_req - Issue transport requests and wait * @mrioc: Adapter instance reference @@ -2164,7 +2161,7 @@ out_fail: * * Return nothing. */ -static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc, +void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc, struct mpi3mr_sas_node *sas_expander) { struct mpi3mr_sas_port *mr_sas_port, *next; diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index e5ecd6ada6cd..e8a4750f6ec4 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -785,7 +785,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, goto out_fail; } port = sas_port_alloc_num(sas_node->parent_dev); - if ((sas_port_add(port))) { + if (!port || (sas_port_add(port))) { ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); goto out_fail; @@ -824,6 +824,12 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, mpt3sas_port->remote_identify.sas_address; } + if (!rphy) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_delete_port; + } + rphy->identify = mpt3sas_port->remote_identify; if ((sas_rphy_add(rphy))) { @@ -831,6 +837,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, __FILE__, __LINE__, __func__); sas_rphy_free(rphy); rphy = NULL; + goto out_delete_port; } if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { @@ -857,7 +864,10 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, rphy_to_expander_device(rphy), hba_port->port_id); return mpt3sas_port; - out_fail: +out_delete_port: + sas_port_delete(port); + +out_fail: list_for_each_entry_safe(mpt3sas_phy, next, &mpt3sas_port->phy_list, port_siblings) list_del(&mpt3sas_phy->port_siblings); diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 030625ebb4e6..71feda2cdb63 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1900,6 +1900,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, } req->outstanding_cmds[index] = NULL; + + qla_put_fw_resources(sp->qpair, &sp->iores); return sp; } @@ -3112,7 +3114,6 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, } bsg_reply->reply_payload_rcv_len = 0; - qla_put_fw_resources(sp->qpair, &sp->iores); done: /* Return the vendor specific reply to API */ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 80c4ee9df2a4..bee1b8a82020 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1865,6 +1865,17 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; if (sp) { + /* + * perform lockless completion during driver unload + */ + if (qla2x00_chip_is_down(vha)) { + req->outstanding_cmds[cnt] = NULL; + spin_unlock_irqrestore(qp->qp_lock_ptr, flags); + sp->done(sp, res); + spin_lock_irqsave(qp->qp_lock_ptr, flags); + continue; + } + switch (sp->cmd_type) { case TYPE_SRB: qla2x00_abort_srb(qp, sp, res, &flags); diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 7d2210a006f0..5cce1ba70fc6 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -326,6 +326,9 @@ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page) unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4); int result; + if (sdev->no_vpd_size) + return SCSI_DEFAULT_VPD_LEN; + /* * Fetch the VPD page header to find out how big the page * is. This is done to prevent problems on legacy devices diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index c7080454aea9..3fcaf10a9dfe 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -134,7 +134,7 @@ static struct { {"3PARdata", "VV", NULL, BLIST_REPORTLUN2}, {"ADAPTEC", "AACRAID", NULL, BLIST_FORCELUN}, {"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN}, - {"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES}, + {"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES | BLIST_NO_VPD_SIZE}, {"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN}, {"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36}, {"BROWNIE", "1200U3P", NULL, BLIST_NOREPORTLUN}, @@ -188,6 +188,7 @@ static struct { {"HPE", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, {"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN}, {"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, + {"IBM", "2076", NULL, BLIST_NO_VPD_SIZE}, {"IBM", "2105", NULL, BLIST_RETRY_HWERROR}, {"iomega", "jaz 1GB", "J.86", BLIST_NOTQ | BLIST_NOLUN}, {"IOMEGA", "ZIP", NULL, BLIST_NOTQ | BLIST_NOLUN}, @@ -233,6 +234,7 @@ static struct { {"SGI", "RAID5", "*", BLIST_SPARSELUN}, {"SGI", "TP9100", "*", BLIST_REPORTLUN2}, {"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, + {"SKhynix", "H28U74301AMR", NULL, BLIST_SKIP_VPD_PAGES}, {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, {"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 4e842d79de31..d217be323cc6 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -1057,6 +1057,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, else if (*bflags & BLIST_SKIP_VPD_PAGES) sdev->skip_vpd_pages = 1; + if (*bflags & BLIST_NO_VPD_SIZE) + sdev->no_vpd_size = 1; + transport_configure_device(&sdev->sdev_gendev); if (sdev->host->hostt->slave_configure) { diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index 23ce2f78c4ed..26efe12012a0 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -191,9 +191,9 @@ static const struct llcc_slice_config sc8280xp_data[] = { { LLCC_CVP, 28, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 }, { LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0, 0 }, { LLCC_WRCACHE, 31, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, - { LLCC_CVPFW, 32, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 }, - { LLCC_CPUSS1, 33, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 }, - { LLCC_CPUHWT, 36, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_CVPFW, 17, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 }, + { LLCC_CPUSS1, 3, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 }, + { LLCC_CPUHWT, 5, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, }; static const struct llcc_slice_config sdm845_data[] = { diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c index 2d3ee22b9249..538fa182169a 100644 --- a/drivers/soc/qcom/rmtfs_mem.c +++ b/drivers/soc/qcom/rmtfs_mem.c @@ -176,7 +176,8 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev) struct reserved_mem *rmem; struct qcom_rmtfs_mem *rmtfs_mem; u32 client_id; - u32 num_vmids, vmid[NUM_MAX_VMIDS]; + u32 vmid[NUM_MAX_VMIDS]; + int num_vmids; int ret, i; rmem = of_reserved_mem_lookup(node); @@ -228,8 +229,11 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev) } num_vmids = of_property_count_u32_elems(node, "qcom,vmid"); - if (num_vmids < 0) { - dev_err(&pdev->dev, "failed to count qcom,vmid elements: %d\n", ret); + if (num_vmids == -EINVAL) { + /* qcom,vmid is optional */ + num_vmids = 0; + } else if (num_vmids < 0) { + dev_err(&pdev->dev, "failed to count qcom,vmid elements: %d\n", num_vmids); goto remove_cdev; } else if (num_vmids > NUM_MAX_VMIDS) { dev_warn(&pdev->dev, diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c index 297dc62bca29..372d64756ed6 100644 --- a/drivers/tee/amdtee/core.c +++ b/drivers/tee/amdtee/core.c @@ -267,35 +267,34 @@ int amdtee_open_session(struct tee_context *ctx, goto out; } + /* Open session with loaded TA */ + handle_open_session(arg, &session_info, param); + if (arg->ret != TEEC_SUCCESS) { + pr_err("open_session failed %d\n", arg->ret); + handle_unload_ta(ta_handle); + kref_put(&sess->refcount, destroy_session); + goto out; + } + /* Find an empty session index for the given TA */ spin_lock(&sess->lock); i = find_first_zero_bit(sess->sess_mask, TEE_NUM_SESSIONS); - if (i < TEE_NUM_SESSIONS) + if (i < TEE_NUM_SESSIONS) { + sess->session_info[i] = session_info; + set_session_id(ta_handle, i, &arg->session); set_bit(i, sess->sess_mask); + } spin_unlock(&sess->lock); if (i >= TEE_NUM_SESSIONS) { pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS); + handle_close_session(ta_handle, session_info); handle_unload_ta(ta_handle); kref_put(&sess->refcount, destroy_session); rc = -ENOMEM; goto out; } - /* Open session with loaded TA */ - handle_open_session(arg, &session_info, param); - if (arg->ret != TEEC_SUCCESS) { - pr_err("open_session failed %d\n", arg->ret); - spin_lock(&sess->lock); - clear_bit(i, sess->sess_mask); - spin_unlock(&sess->lock); - handle_unload_ta(ta_handle); - kref_put(&sess->refcount, destroy_session); - goto out; - } - - sess->session_info[i] = session_info; - set_session_id(ta_handle, i, &arg->session); out: free_pages((u64)ta, get_order(ta_size)); return rc; diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 55679fd86505..566df4522b88 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -613,6 +613,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, struct thermal_instance *pos; struct thermal_zone_device *pos1; struct thermal_cooling_device *pos2; + bool upper_no_limit; int result; if (trip >= tz->num_trips || trip < 0) @@ -632,7 +633,13 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, /* lower default 0, upper default max_state */ lower = lower == THERMAL_NO_LIMIT ? 0 : lower; - upper = upper == THERMAL_NO_LIMIT ? cdev->max_state : upper; + + if (upper == THERMAL_NO_LIMIT) { + upper = cdev->max_state; + upper_no_limit = true; + } else { + upper_no_limit = false; + } if (lower > upper || upper > cdev->max_state) return -EINVAL; @@ -644,6 +651,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, dev->cdev = cdev; dev->trip = trip; dev->upper = upper; + dev->upper_no_limit = upper_no_limit; dev->lower = lower; dev->target = THERMAL_NO_TARGET; dev->weight = weight; @@ -1045,6 +1053,91 @@ devm_thermal_of_cooling_device_register(struct device *dev, } EXPORT_SYMBOL_GPL(devm_thermal_of_cooling_device_register); +static bool thermal_cooling_device_present(struct thermal_cooling_device *cdev) +{ + struct thermal_cooling_device *pos = NULL; + + list_for_each_entry(pos, &thermal_cdev_list, node) { + if (pos == cdev) + return true; + } + + return false; +} + +/** + * thermal_cooling_device_update - Update a cooling device object + * @cdev: Target cooling device. + * + * Update @cdev to reflect a change of the underlying hardware or platform. + * + * Must be called when the maximum cooling state of @cdev becomes invalid and so + * its .get_max_state() callback needs to be run to produce the new maximum + * cooling state value. + */ +void thermal_cooling_device_update(struct thermal_cooling_device *cdev) +{ + struct thermal_instance *ti; + unsigned long state; + + if (IS_ERR_OR_NULL(cdev)) + return; + + /* + * Hold thermal_list_lock throughout the update to prevent the device + * from going away while being updated. + */ + mutex_lock(&thermal_list_lock); + + if (!thermal_cooling_device_present(cdev)) + goto unlock_list; + + /* + * Update under the cdev lock to prevent the state from being set beyond + * the new limit concurrently. + */ + mutex_lock(&cdev->lock); + + if (cdev->ops->get_max_state(cdev, &cdev->max_state)) + goto unlock; + + thermal_cooling_device_stats_reinit(cdev); + + list_for_each_entry(ti, &cdev->thermal_instances, cdev_node) { + if (ti->upper == cdev->max_state) + continue; + + if (ti->upper < cdev->max_state) { + if (ti->upper_no_limit) + ti->upper = cdev->max_state; + + continue; + } + + ti->upper = cdev->max_state; + if (ti->lower > ti->upper) + ti->lower = ti->upper; + + if (ti->target == THERMAL_NO_TARGET) + continue; + + if (ti->target > ti->upper) + ti->target = ti->upper; + } + + if (cdev->ops->get_cur_state(cdev, &state) || state > cdev->max_state) + goto unlock; + + thermal_cooling_device_stats_update(cdev, state); + +unlock: + mutex_unlock(&cdev->lock); + +unlock_list: + mutex_unlock(&thermal_list_lock); +} +EXPORT_SYMBOL_GPL(thermal_cooling_device_update); + static void __unbind(struct thermal_zone_device *tz, int mask, struct thermal_cooling_device *cdev) { @@ -1067,20 +1160,17 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev) int i; const struct thermal_zone_params *tzp; struct thermal_zone_device *tz; - struct thermal_cooling_device *pos = NULL; if (!cdev) return; mutex_lock(&thermal_list_lock); - list_for_each_entry(pos, &thermal_cdev_list, node) - if (pos == cdev) - break; - if (pos != cdev) { - /* thermal cooling device not found */ + + if (!thermal_cooling_device_present(cdev)) { mutex_unlock(&thermal_list_lock); return; } + list_del(&cdev->node); /* Unbind all thermal zones associated with 'this' cdev */ @@ -1309,7 +1399,7 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t struct thermal_trip trip; result = thermal_zone_get_trip(tz, count, &trip); - if (result) + if (result || !trip.temperature) set_bit(count, &tz->trips_disabled); } diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 7af54382e915..3d4a787c6b28 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -101,6 +101,7 @@ struct thermal_instance { struct list_head tz_node; /* node in tz->thermal_instances */ struct list_head cdev_node; /* node in cdev->thermal_instances */ unsigned int weight; /* The weight of the cooling device */ + bool upper_no_limit; }; #define to_thermal_zone(_dev) \ @@ -127,6 +128,7 @@ int thermal_zone_create_device_groups(struct thermal_zone_device *, int); void thermal_zone_destroy_device_groups(struct thermal_zone_device *); void thermal_cooling_device_setup_sysfs(struct thermal_cooling_device *); void thermal_cooling_device_destroy_sysfs(struct thermal_cooling_device *cdev); +void thermal_cooling_device_stats_reinit(struct thermal_cooling_device *cdev); /* used only at binding time */ ssize_t trip_point_show(struct device *, struct device_attribute *, char *); ssize_t weight_show(struct device *, struct device_attribute *, char *); diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c index cef860deaf91..a4aba7b8bb8b 100644 --- a/drivers/thermal/thermal_sysfs.c +++ b/drivers/thermal/thermal_sysfs.c @@ -685,6 +685,8 @@ void thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev, { struct cooling_dev_stats *stats = cdev->stats; + lockdep_assert_held(&cdev->lock); + if (!stats) return; @@ -706,13 +708,22 @@ static ssize_t total_trans_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_cooling_device *cdev = to_cooling_device(dev); - struct cooling_dev_stats *stats = cdev->stats; - int ret; + struct cooling_dev_stats *stats; + int ret = 0; + + mutex_lock(&cdev->lock); + + stats = cdev->stats; + if (!stats) + goto unlock; spin_lock(&stats->lock); ret = sprintf(buf, "%u\n", stats->total_trans); spin_unlock(&stats->lock); +unlock: + mutex_unlock(&cdev->lock); + return ret; } @@ -721,11 +732,18 @@ time_in_state_ms_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_cooling_device *cdev = to_cooling_device(dev); - struct cooling_dev_stats *stats = cdev->stats; + struct cooling_dev_stats *stats; ssize_t len = 0; int i; + mutex_lock(&cdev->lock); + + stats = cdev->stats; + if (!stats) + goto unlock; + spin_lock(&stats->lock); + update_time_in_state(stats); for (i = 0; i <= cdev->max_state; i++) { @@ -734,6 +752,9 @@ time_in_state_ms_show(struct device *dev, struct device_attribute *attr, } spin_unlock(&stats->lock); +unlock: + mutex_unlock(&cdev->lock); + return len; } @@ -742,8 +763,16 @@ reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct thermal_cooling_device *cdev = to_cooling_device(dev); - struct cooling_dev_stats *stats = cdev->stats; - int i, states = cdev->max_state + 1; + struct cooling_dev_stats *stats; + int i, states; + + mutex_lock(&cdev->lock); + + stats = cdev->stats; + if (!stats) + goto unlock; + + states = cdev->max_state + 1; spin_lock(&stats->lock); @@ -757,6 +786,9 @@ reset_store(struct device *dev, struct device_attribute *attr, const char *buf, spin_unlock(&stats->lock); +unlock: + mutex_unlock(&cdev->lock); + return count; } @@ -764,10 +796,18 @@ static ssize_t trans_table_show(struct device *dev, struct device_attribute *attr, char *buf) { struct thermal_cooling_device *cdev = to_cooling_device(dev); - struct cooling_dev_stats *stats = cdev->stats; + struct cooling_dev_stats *stats; ssize_t len = 0; int i, j; + mutex_lock(&cdev->lock); + + stats = cdev->stats; + if (!stats) { + len = -ENODATA; + goto unlock; + } + len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n"); len += snprintf(buf + len, PAGE_SIZE - len, " : "); for (i = 0; i <= cdev->max_state; i++) { @@ -775,8 +815,10 @@ static ssize_t trans_table_show(struct device *dev, break; len += snprintf(buf + len, PAGE_SIZE - len, "state%2u ", i); } - if (len >= PAGE_SIZE) - return PAGE_SIZE; + if (len >= PAGE_SIZE) { + len = PAGE_SIZE; + goto unlock; + } len += snprintf(buf + len, PAGE_SIZE - len, "\n"); @@ -799,8 +841,12 @@ static ssize_t trans_table_show(struct device *dev, if (len >= PAGE_SIZE) { pr_warn_once("Thermal transition table exceeds PAGE_SIZE. Disabling\n"); - return -EFBIG; + len = -EFBIG; } + +unlock: + mutex_unlock(&cdev->lock); + return len; } @@ -830,6 +876,8 @@ static void cooling_device_stats_setup(struct thermal_cooling_device *cdev) unsigned long states = cdev->max_state + 1; int var; + lockdep_assert_held(&cdev->lock); + var = sizeof(*stats); var += sizeof(*stats->time_in_state) * states; var += sizeof(*stats->trans_table) * states * states; @@ -855,6 +903,8 @@ out: static void cooling_device_stats_destroy(struct thermal_cooling_device *cdev) { + lockdep_assert_held(&cdev->lock); + kfree(cdev->stats); cdev->stats = NULL; } @@ -879,6 +929,12 @@ void thermal_cooling_device_destroy_sysfs(struct thermal_cooling_device *cdev) cooling_device_stats_destroy(cdev); } +void thermal_cooling_device_stats_reinit(struct thermal_cooling_device *cdev) +{ + cooling_device_stats_destroy(cdev); + cooling_device_stats_setup(cdev); +} + /* these helper will be used only at the time of bindig */ ssize_t trip_point_show(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c index 4339e706cc3a..f92ad71ef983 100644 --- a/drivers/thunderbolt/debugfs.c +++ b/drivers/thunderbolt/debugfs.c @@ -942,7 +942,8 @@ static void margining_port_remove(struct tb_port *port) snprintf(dir_name, sizeof(dir_name), "port%d", port->port); parent = debugfs_lookup(dir_name, port->sw->debugfs_dir); - debugfs_remove_recursive(debugfs_lookup("margining", parent)); + if (parent) + debugfs_remove_recursive(debugfs_lookup("margining", parent)); kfree(port->usb4->margining); port->usb4->margining = NULL; @@ -967,19 +968,18 @@ static void margining_switch_init(struct tb_switch *sw) static void margining_switch_remove(struct tb_switch *sw) { + struct tb_port *upstream, *downstream; struct tb_switch *parent_sw; - struct tb_port *downstream; u64 route = tb_route(sw); if (!route) return; - /* - * Upstream is removed with the router itself but we need to - * remove the downstream port margining directory. - */ + upstream = tb_upstream_port(sw); parent_sw = tb_switch_parent(sw); downstream = tb_port_at(route, parent_sw); + + margining_port_remove(upstream); margining_port_remove(downstream); } diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index 4dce2edd86ea..cfebec107f3f 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -46,7 +46,7 @@ #define QUIRK_AUTO_CLEAR_INT BIT(0) #define QUIRK_E2E BIT(1) -static int ring_interrupt_index(struct tb_ring *ring) +static int ring_interrupt_index(const struct tb_ring *ring) { int bit = ring->hop; if (!ring->is_tx) @@ -63,13 +63,14 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active) { int reg = REG_RING_INTERRUPT_BASE + ring_interrupt_index(ring) / 32 * 4; - int bit = ring_interrupt_index(ring) & 31; - int mask = 1 << bit; + int interrupt_bit = ring_interrupt_index(ring) & 31; + int mask = 1 << interrupt_bit; u32 old, new; if (ring->irq > 0) { u32 step, shift, ivr, misc; void __iomem *ivr_base; + int auto_clear_bit; int index; if (ring->is_tx) @@ -77,18 +78,25 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active) else index = ring->hop + ring->nhi->hop_count; - if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) { - /* - * Ask the hardware to clear interrupt status - * bits automatically since we already know - * which interrupt was triggered. - */ - misc = ioread32(ring->nhi->iobase + REG_DMA_MISC); - if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) { - misc |= REG_DMA_MISC_INT_AUTO_CLEAR; - iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC); - } - } + /* + * Intel routers support a bit that isn't part of + * the USB4 spec to ask the hardware to clear + * interrupt status bits automatically since + * we already know which interrupt was triggered. + * + * Other routers explicitly disable auto-clear + * to prevent conditions that may occur where two + * MSIX interrupts are simultaneously active and + * reading the register clears both of them. + */ + misc = ioread32(ring->nhi->iobase + REG_DMA_MISC); + if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) + auto_clear_bit = REG_DMA_MISC_INT_AUTO_CLEAR; + else + auto_clear_bit = REG_DMA_MISC_DISABLE_AUTO_CLEAR; + if (!(misc & auto_clear_bit)) + iowrite32(misc | auto_clear_bit, + ring->nhi->iobase + REG_DMA_MISC); ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE; step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS; @@ -108,7 +116,7 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active) dev_dbg(&ring->nhi->pdev->dev, "%s interrupt at register %#x bit %d (%#x -> %#x)\n", - active ? "enabling" : "disabling", reg, bit, old, new); + active ? "enabling" : "disabling", reg, interrupt_bit, old, new); if (new == old) dev_WARN(&ring->nhi->pdev->dev, @@ -393,14 +401,17 @@ EXPORT_SYMBOL_GPL(tb_ring_poll_complete); static void ring_clear_msix(const struct tb_ring *ring) { + int bit; + if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) return; + bit = ring_interrupt_index(ring) & 31; if (ring->is_tx) - ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE); + iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR); else - ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE + - 4 * (ring->nhi->hop_count / 32)); + iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR + + 4 * (ring->nhi->hop_count / 32)); } static irqreturn_t ring_msix(int irq, void *data) diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h index 0d4970dcef84..faef165a919c 100644 --- a/drivers/thunderbolt/nhi_regs.h +++ b/drivers/thunderbolt/nhi_regs.h @@ -77,12 +77,13 @@ struct ring_desc { /* * three bitfields: tx, rx, rx overflow - * Every bitfield contains one bit for every hop (REG_HOP_COUNT). Registers are - * cleared on read. New interrupts are fired only after ALL registers have been + * Every bitfield contains one bit for every hop (REG_HOP_COUNT). + * New interrupts are fired only after ALL registers have been * read (even those containing only disabled rings). */ #define REG_RING_NOTIFY_BASE 0x37800 #define RING_NOTIFY_REG_COUNT(nhi) ((31 + 3 * nhi->hop_count) / 32) +#define REG_RING_INT_CLEAR 0x37808 /* * two bitfields: rx, tx @@ -105,6 +106,7 @@ struct ring_desc { #define REG_DMA_MISC 0x39864 #define REG_DMA_MISC_INT_AUTO_CLEAR BIT(2) +#define REG_DMA_MISC_DISABLE_AUTO_CLEAR BIT(17) #define REG_INMAIL_DATA 0x39900 diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c index b5f2ec79c4d6..1157b8869bcc 100644 --- a/drivers/thunderbolt/quirks.c +++ b/drivers/thunderbolt/quirks.c @@ -20,6 +20,25 @@ static void quirk_dp_credit_allocation(struct tb_switch *sw) } } +static void quirk_clx_disable(struct tb_switch *sw) +{ + sw->quirks |= QUIRK_NO_CLX; + tb_sw_dbg(sw, "disabling CL states\n"); +} + +static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw) +{ + struct tb_port *port; + + tb_switch_for_each_port(sw, port) { + if (!tb_port_is_usb3_down(port)) + continue; + port->max_bw = 16376; + tb_port_dbg(port, "USB3 maximum bandwidth limited to %u Mb/s\n", + port->max_bw); + } +} + struct tb_quirk { u16 hw_vendor_id; u16 hw_device_id; @@ -37,6 +56,31 @@ static const struct tb_quirk tb_quirks[] = { * DP buffers. */ { 0x8087, 0x0b26, 0x0000, 0x0000, quirk_dp_credit_allocation }, + /* + * Limit the maximum USB3 bandwidth for the following Intel USB4 + * host routers due to a hardware issue. + */ + { 0x8087, PCI_DEVICE_ID_INTEL_ADL_NHI0, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_ADL_NHI1, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_RPL_NHI0, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_RPL_NHI1, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_MTL_M_NHI0, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI0, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI1, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + /* + * CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms. + */ + { 0x0438, 0x0208, 0x0000, 0x0000, quirk_clx_disable }, + { 0x0438, 0x0209, 0x0000, 0x0000, quirk_clx_disable }, + { 0x0438, 0x020a, 0x0000, 0x0000, quirk_clx_disable }, + { 0x0438, 0x020b, 0x0000, 0x0000, quirk_clx_disable }, }; /** diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c index 56008eb91e2e..9cc28197dbc4 100644 --- a/drivers/thunderbolt/retimer.c +++ b/drivers/thunderbolt/retimer.c @@ -187,6 +187,22 @@ static ssize_t nvm_authenticate_show(struct device *dev, return ret; } +static void tb_retimer_set_inbound_sbtx(struct tb_port *port) +{ + int i; + + for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) + usb4_port_retimer_set_inbound_sbtx(port, i); +} + +static void tb_retimer_unset_inbound_sbtx(struct tb_port *port) +{ + int i; + + for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--) + usb4_port_retimer_unset_inbound_sbtx(port, i); +} + static ssize_t nvm_authenticate_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { @@ -213,6 +229,7 @@ static ssize_t nvm_authenticate_store(struct device *dev, rt->auth_status = 0; if (val) { + tb_retimer_set_inbound_sbtx(rt->port); if (val == AUTHENTICATE_ONLY) { ret = tb_retimer_nvm_authenticate(rt, true); } else { @@ -232,6 +249,7 @@ static ssize_t nvm_authenticate_store(struct device *dev, } exit_unlock: + tb_retimer_unset_inbound_sbtx(rt->port); mutex_unlock(&rt->tb->lock); exit_rpm: pm_runtime_mark_last_busy(&rt->dev); @@ -440,8 +458,7 @@ int tb_retimer_scan(struct tb_port *port, bool add) * Enable sideband channel for each retimer. We can do this * regardless whether there is device connected or not. */ - for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) - usb4_port_retimer_set_inbound_sbtx(port, i); + tb_retimer_set_inbound_sbtx(port); /* * Before doing anything else, read the authentication status. @@ -464,6 +481,8 @@ int tb_retimer_scan(struct tb_port *port, bool add) break; } + tb_retimer_unset_inbound_sbtx(port); + if (!last_idx) return 0; diff --git a/drivers/thunderbolt/sb_regs.h b/drivers/thunderbolt/sb_regs.h index 5185cf3e4d97..f37a4320f10a 100644 --- a/drivers/thunderbolt/sb_regs.h +++ b/drivers/thunderbolt/sb_regs.h @@ -20,6 +20,7 @@ enum usb4_sb_opcode { USB4_SB_OPCODE_ROUTER_OFFLINE = 0x4e45534c, /* "LSEN" */ USB4_SB_OPCODE_ENUMERATE_RETIMERS = 0x4d554e45, /* "ENUM" */ USB4_SB_OPCODE_SET_INBOUND_SBTX = 0x5055534c, /* "LSUP" */ + USB4_SB_OPCODE_UNSET_INBOUND_SBTX = 0x50555355, /* "USUP" */ USB4_SB_OPCODE_QUERY_LAST_RETIMER = 0x5453414c, /* "LAST" */ USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE = 0x53534e47, /* "GNSS" */ USB4_SB_OPCODE_NVM_SET_OFFSET = 0x53504f42, /* "BOPS" */ diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 3370e18ba05f..da373ac38285 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -2968,8 +2968,6 @@ int tb_switch_add(struct tb_switch *sw) dev_warn(&sw->dev, "reading DROM failed: %d\n", ret); tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); - tb_check_quirks(sw); - ret = tb_switch_set_uuid(sw); if (ret) { dev_err(&sw->dev, "failed to set UUID\n"); @@ -2988,6 +2986,8 @@ int tb_switch_add(struct tb_switch *sw) } } + tb_check_quirks(sw); + tb_switch_default_link_ports(sw); ret = tb_switch_update_link_attributes(sw); diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index cbb20a277346..275ff5219a3a 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -23,6 +23,11 @@ #define NVM_MAX_SIZE SZ_512K #define NVM_DATA_DWORDS 16 +/* Keep link controller awake during update */ +#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0) +/* Disable CLx if not supported */ +#define QUIRK_NO_CLX BIT(1) + /** * struct tb_nvm - Structure holding NVM information * @dev: Owner of the NVM @@ -267,6 +272,8 @@ struct tb_bandwidth_group { * @group: Bandwidth allocation group the adapter is assigned to. Only * used for DP IN adapters for now. * @group_list: The adapter is linked to the group's list of ports through this + * @max_bw: Maximum possible bandwidth through this adapter if set to + * non-zero. * * In USB4 terminology this structure represents an adapter (protocol or * lane adapter). @@ -294,6 +301,7 @@ struct tb_port { unsigned int dma_credits; struct tb_bandwidth_group *group; struct list_head group_list; + unsigned int max_bw; }; /** @@ -1019,6 +1027,9 @@ static inline bool tb_switch_is_clx_enabled(const struct tb_switch *sw, */ static inline bool tb_switch_is_clx_supported(const struct tb_switch *sw) { + if (sw->quirks & QUIRK_NO_CLX) + return false; + return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw); } @@ -1234,6 +1245,7 @@ int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing, int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors); int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index); +int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index); int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf, u8 size); int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg, @@ -1291,9 +1303,6 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port); void usb4_port_device_remove(struct usb4_port *usb4); int usb4_port_device_resume(struct usb4_port *usb4); -/* Keep link controller awake during update */ -#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0) - void tb_check_quirks(struct tb_switch *sw); #ifdef CONFIG_ACPI diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index 1e5e9c147a31..a0996cb2893c 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -1579,6 +1579,20 @@ int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index) } /** + * usb4_port_retimer_unset_inbound_sbtx() - Disable sideband channel transactions + * @port: USB4 port + * @index: Retimer index + * + * Disables sideband channel transations on SBTX. The reverse of + * usb4_port_retimer_set_inbound_sbtx(). + */ +int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index) +{ + return usb4_port_retimer_op(port, index, + USB4_SB_OPCODE_UNSET_INBOUND_SBTX, 500); +} + +/** * usb4_port_retimer_read() - Read from retimer sideband registers * @port: USB4 port * @index: Retimer index @@ -1868,6 +1882,15 @@ int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index, usb4_port_retimer_nvm_read_block, &info); } +static inline unsigned int +usb4_usb3_port_max_bandwidth(const struct tb_port *port, unsigned int bw) +{ + /* Take the possible bandwidth limitation into account */ + if (port->max_bw) + return min(bw, port->max_bw); + return bw; +} + /** * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate * @port: USB3 adapter port @@ -1889,7 +1912,9 @@ int usb4_usb3_port_max_link_rate(struct tb_port *port) return ret; lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT; - return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000; + ret = lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000; + + return usb4_usb3_port_max_bandwidth(port, ret); } /** @@ -1916,7 +1941,9 @@ int usb4_usb3_port_actual_link_rate(struct tb_port *port) return 0; lr = val & ADP_USB3_CS_4_ALR_MASK; - return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000; + ret = lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000; + + return usb4_usb3_port_max_bandwidth(port, ret); } static int usb4_usb3_port_cm_request(struct tb_port *port, bool request) @@ -2067,18 +2094,30 @@ static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port, int downstream_bw) { u32 val, ubw, dbw, scale; - int ret; + int ret, max_bw; - /* Read the used scale, hardware default is 0 */ - ret = tb_port_read(port, &scale, TB_CFG_PORT, - port->cap_adap + ADP_USB3_CS_3, 1); + /* Figure out suitable scale */ + scale = 0; + max_bw = max(upstream_bw, downstream_bw); + while (scale < 64) { + if (mbps_to_usb3_bw(max_bw, scale) < 4096) + break; + scale++; + } + + if (WARN_ON(scale >= 64)) + return -EINVAL; + + ret = tb_port_write(port, &scale, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_3, 1); if (ret) return ret; - scale &= ADP_USB3_CS_3_SCALE_MASK; ubw = mbps_to_usb3_bw(upstream_bw, scale); dbw = mbps_to_usb3_bw(downstream_bw, scale); + tb_port_dbg(port, "scaled bandwidth %u/%u, scale %u\n", ubw, dbw, scale); + ret = tb_port_read(port, &val, TB_CFG_PORT, port->cap_adap + ADP_USB3_CS_2, 1); if (ret) diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index 5bddb2f5e931..98764e740c07 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c @@ -43,6 +43,7 @@ struct xencons_info { int irq; int vtermno; grant_ref_t gntref; + spinlock_t ring_lock; }; static LIST_HEAD(xenconsoles); @@ -89,12 +90,15 @@ static int __write_console(struct xencons_info *xencons, XENCONS_RING_IDX cons, prod; struct xencons_interface *intf = xencons->intf; int sent = 0; + unsigned long flags; + spin_lock_irqsave(&xencons->ring_lock, flags); cons = intf->out_cons; prod = intf->out_prod; mb(); /* update queue values before going on */ if ((prod - cons) > sizeof(intf->out)) { + spin_unlock_irqrestore(&xencons->ring_lock, flags); pr_err_once("xencons: Illegal ring page indices"); return -EINVAL; } @@ -104,6 +108,7 @@ static int __write_console(struct xencons_info *xencons, wmb(); /* write ring before updating pointer */ intf->out_prod = prod; + spin_unlock_irqrestore(&xencons->ring_lock, flags); if (sent) notify_daemon(xencons); @@ -146,16 +151,19 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len) int recv = 0; struct xencons_info *xencons = vtermno_to_xencons(vtermno); unsigned int eoiflag = 0; + unsigned long flags; if (xencons == NULL) return -EINVAL; intf = xencons->intf; + spin_lock_irqsave(&xencons->ring_lock, flags); cons = intf->in_cons; prod = intf->in_prod; mb(); /* get pointers before reading ring */ if ((prod - cons) > sizeof(intf->in)) { + spin_unlock_irqrestore(&xencons->ring_lock, flags); pr_err_once("xencons: Illegal ring page indices"); return -EINVAL; } @@ -179,10 +187,13 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len) xencons->out_cons = intf->out_cons; xencons->out_cons_same = 0; } + if (!recv && xencons->out_cons_same++ > 1) { + eoiflag = XEN_EOI_FLAG_SPURIOUS; + } + spin_unlock_irqrestore(&xencons->ring_lock, flags); + if (recv) { notify_daemon(xencons); - } else if (xencons->out_cons_same++ > 1) { - eoiflag = XEN_EOI_FLAG_SPURIOUS; } xen_irq_lateeoi(xencons->irq, eoiflag); @@ -239,6 +250,7 @@ static int xen_hvm_console_init(void) info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL); if (!info) return -ENOMEM; + spin_lock_init(&info->ring_lock); } else if (info->intf != NULL) { /* already configured */ return 0; @@ -275,6 +287,7 @@ err: static int xencons_info_pv_init(struct xencons_info *info, int vtermno) { + spin_lock_init(&info->ring_lock); info->evtchn = xen_start_info->console.domU.evtchn; /* GFN == MFN for PV guest */ info->intf = gfn_to_virt(xen_start_info->console.domU.mfn); @@ -325,6 +338,7 @@ static int xen_initial_domain_console_init(void) info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL); if (!info) return -ENOMEM; + spin_lock_init(&info->ring_lock); } info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false); @@ -482,6 +496,7 @@ static int xencons_probe(struct xenbus_device *dev, info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL); if (!info) return -ENOMEM; + spin_lock_init(&info->ring_lock); dev_set_drvdata(&dev->dev, info); info->xbdev = dev; info->vtermno = xenbus_devid_to_vtermno(devid); diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c index aa80de3a8194..678014253b7b 100644 --- a/drivers/tty/serdev/core.c +++ b/drivers/tty/serdev/core.c @@ -534,7 +534,7 @@ static int of_serdev_register_devices(struct serdev_controller *ctrl) if (!serdev) continue; - serdev->dev.of_node = node; + device_set_node(&serdev->dev, of_fwnode_handle(node)); err = serdev_device_add(serdev); if (err) { diff --git a/drivers/tty/serial/8250/8250_em.c b/drivers/tty/serial/8250/8250_em.c index f8e99995eee9..d94c3811a8f7 100644 --- a/drivers/tty/serial/8250/8250_em.c +++ b/drivers/tty/serial/8250/8250_em.c @@ -106,8 +106,8 @@ static int serial8250_em_probe(struct platform_device *pdev) memset(&up, 0, sizeof(up)); up.port.mapbase = regs->start; up.port.irq = irq; - up.port.type = PORT_UNKNOWN; - up.port.flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT | UPF_IOREMAP; + up.port.type = PORT_16750; + up.port.flags = UPF_FIXED_PORT | UPF_IOREMAP | UPF_FIXED_TYPE; up.port.dev = &pdev->dev; up.port.private_data = priv; diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c index 8aad15622a2e..8adfaa183f77 100644 --- a/drivers/tty/serial/8250/8250_fsl.c +++ b/drivers/tty/serial/8250/8250_fsl.c @@ -34,7 +34,7 @@ int fsl8250_handle_irq(struct uart_port *port) iir = port->serial_in(port, UART_IIR); if (iir & UART_IIR_NO_INT) { - spin_unlock(&up->port.lock); + spin_unlock_irqrestore(&up->port.lock, flags); return 0; } @@ -42,7 +42,7 @@ int fsl8250_handle_irq(struct uart_port *port) if (unlikely(up->lsr_saved_flags & UART_LSR_BI)) { up->lsr_saved_flags &= ~UART_LSR_BI; port->serial_in(port, UART_RX); - spin_unlock(&up->port.lock); + spin_unlock_irqrestore(&up->port.lock, flags); return 1; } diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig index 978dc196c29b..5313aa31930f 100644 --- a/drivers/tty/serial/8250/Kconfig +++ b/drivers/tty/serial/8250/Kconfig @@ -257,8 +257,9 @@ config SERIAL_8250_ASPEED_VUART tristate "Aspeed Virtual UART" depends on SERIAL_8250 depends on OF - depends on REGMAP && MFD_SYSCON + depends on MFD_SYSCON depends on ARCH_ASPEED || COMPILE_TEST + select REGMAP help If you want to use the virtual UART (VUART) device on Aspeed BMC platforms, enable this option. This enables the 16550A- @@ -299,7 +300,6 @@ config SERIAL_8250_PCI1XXXX tristate "Microchip 8250 based serial port" depends on SERIAL_8250 && PCI select SERIAL_8250_PCILIB - default SERIAL_8250 help Select this option if you have a setup with Microchip PCIe Switch with serial port enabled and wish to enable 8250 diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 625358f44419..0072892ca7fc 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1313,7 +1313,7 @@ config SERIAL_FSL_LPUART config SERIAL_FSL_LPUART_CONSOLE bool "Console on Freescale lpuart serial port" - depends on SERIAL_FSL_LPUART + depends on SERIAL_FSL_LPUART=y select SERIAL_CORE_CONSOLE select SERIAL_EARLYCON help diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index e945f41b93d4..56e6ba3250cd 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -1354,6 +1354,7 @@ static void lpuart_dma_rx_free(struct uart_port *port) struct dma_chan *chan = sport->dma_rx_chan; dmaengine_terminate_sync(chan); + del_timer_sync(&sport->lpuart_timer); dma_unmap_sg(chan->device->dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE); kfree(sport->rx_ring.buf); sport->rx_ring.tail = 0; @@ -1813,7 +1814,6 @@ static int lpuart32_startup(struct uart_port *port) static void lpuart_dma_shutdown(struct lpuart_port *sport) { if (sport->lpuart_dma_rx_use) { - del_timer_sync(&sport->lpuart_timer); lpuart_dma_rx_free(&sport->port); sport->lpuart_dma_rx_use = false; } @@ -1973,10 +1973,8 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios, * Since timer function acqures sport->port.lock, need to stop before * acquring same lock because otherwise del_timer_sync() can deadlock. */ - if (old && sport->lpuart_dma_rx_use) { - del_timer_sync(&sport->lpuart_timer); + if (old && sport->lpuart_dma_rx_use) lpuart_dma_rx_free(&sport->port); - } spin_lock_irqsave(&sport->port.lock, flags); @@ -2210,10 +2208,8 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, * Since timer function acqures sport->port.lock, need to stop before * acquring same lock because otherwise del_timer_sync() can deadlock. */ - if (old && sport->lpuart_dma_rx_use) { - del_timer_sync(&sport->lpuart_timer); + if (old && sport->lpuart_dma_rx_use) lpuart_dma_rx_free(&sport->port); - } spin_lock_irqsave(&sport->port.lock, flags); @@ -2240,9 +2236,15 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios, /* update the per-port timeout */ uart_update_timeout(port, termios->c_cflag, baud); - /* wait transmit engin complete */ - lpuart32_write(&sport->port, 0, UARTMODIR); - lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC); + /* + * LPUART Transmission Complete Flag may never be set while queuing a break + * character, so skip waiting for transmission complete when UARTCTRL_SBK is + * asserted. + */ + if (!(old_ctrl & UARTCTRL_SBK)) { + lpuart32_write(&sport->port, 0, UARTMODIR); + lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC); + } /* disable transmit and receive */ lpuart32_write(&sport->port, old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE), @@ -3014,7 +3016,6 @@ static int lpuart_suspend(struct device *dev) * cannot resume as expected, hence gracefully release the * Rx DMA path before suspend and start Rx DMA path on resume. */ - del_timer_sync(&sport->lpuart_timer); lpuart_dma_rx_free(&sport->port); /* Disable Rx DMA to use UART port as wakeup source */ diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index d69592e5e2ec..28fbc927a546 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -596,7 +596,7 @@ static void qcom_geni_serial_stop_tx_dma(struct uart_port *uport) if (!qcom_geni_serial_main_active(uport)) return; - if (port->rx_dma_addr) { + if (port->tx_dma_addr) { geni_se_tx_dma_unprep(&port->se, port->tx_dma_addr, port->tx_remaining); port->tx_dma_addr = 0; @@ -631,9 +631,8 @@ static void qcom_geni_serial_start_tx_dma(struct uart_port *uport) if (port->tx_dma_addr) return; - xmit_size = uart_circ_chars_pending(xmit); - if (xmit_size < WAKEUP_CHARS) - uart_write_wakeup(uport); + if (uart_circ_empty(xmit)) + return; xmit_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); @@ -1070,6 +1069,10 @@ static int setup_fifos(struct qcom_geni_serial_port *port) static void qcom_geni_serial_shutdown(struct uart_port *uport) { disable_irq(uport->irq); + + if (uart_console(uport)) + return; + qcom_geni_serial_stop_tx(uport); qcom_geni_serial_stop_rx(uport); } diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 57a5c23b51d4..3c2ea9c098f7 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -4545,6 +4545,9 @@ static int con_font_get(struct vc_data *vc, struct console_font_op *op) int c; unsigned int vpitch = op->op == KD_FONT_OP_GET_TALL ? op->height : 32; + if (vpitch > max_font_height) + return -EINVAL; + if (op->data) { font.data = kvmalloc(max_font_size, GFP_KERNEL); if (!font.data) diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 05eac965ee27..37e178a9ac47 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -1500,7 +1500,7 @@ start_window: scaling->window_start_t = curr_t; scaling->tot_busy_t = 0; - if (hba->outstanding_reqs) { + if (scaling->active_reqs) { scaling->busy_start_t = curr_t; scaling->is_busy_started = true; } else { @@ -2118,7 +2118,7 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) spin_lock_irqsave(hba->host->host_lock, flags); hba->clk_scaling.active_reqs--; - if (!hba->outstanding_reqs && scaling->is_busy_started) { + if (!scaling->active_reqs && scaling->is_busy_started) { scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), scaling->busy_start_t)); scaling->busy_start_t = 0; diff --git a/drivers/usb/cdns3/cdns3-pci-wrap.c b/drivers/usb/cdns3/cdns3-pci-wrap.c index deeea618ba33..1f6320d98a76 100644 --- a/drivers/usb/cdns3/cdns3-pci-wrap.c +++ b/drivers/usb/cdns3/cdns3-pci-wrap.c @@ -60,6 +60,11 @@ static struct pci_dev *cdns3_get_second_fun(struct pci_dev *pdev) return NULL; } + if (func->devfn != PCI_DEV_FN_HOST_DEVICE && + func->devfn != PCI_DEV_FN_OTG) { + return NULL; + } + return func; } diff --git a/drivers/usb/cdns3/cdnsp-ep0.c b/drivers/usb/cdns3/cdnsp-ep0.c index 9b8325f82499..d63d5d92f255 100644 --- a/drivers/usb/cdns3/cdnsp-ep0.c +++ b/drivers/usb/cdns3/cdnsp-ep0.c @@ -403,20 +403,6 @@ static int cdnsp_ep0_std_request(struct cdnsp_device *pdev, case USB_REQ_SET_ISOCH_DELAY: ret = cdnsp_ep0_set_isoch_delay(pdev, ctrl); break; - case USB_REQ_SET_INTERFACE: - /* - * Add request into pending list to block sending status stage - * by libcomposite. - */ - list_add_tail(&pdev->ep0_preq.list, - &pdev->ep0_preq.pep->pending_list); - - ret = cdnsp_ep0_delegate_req(pdev, ctrl); - if (ret == -EBUSY) - ret = 0; - - list_del(&pdev->ep0_preq.list); - break; default: ret = cdnsp_ep0_delegate_req(pdev, ctrl); break; @@ -474,9 +460,6 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev) else ret = cdnsp_ep0_delegate_req(pdev, ctrl); - if (!len) - pdev->ep0_stage = CDNSP_STATUS_STAGE; - if (ret == USB_GADGET_DELAYED_STATUS) { trace_cdnsp_ep0_status_stage("delayed"); return; @@ -484,6 +467,6 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev) out: if (ret < 0) cdnsp_ep0_stall(pdev); - else if (pdev->ep0_stage == CDNSP_STATUS_STAGE) + else if (!len && pdev->ep0_stage != CDNSP_STATUS_STAGE) cdnsp_status_stage(pdev); } diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c index efd54ed918b9..7b151f5af3cc 100644 --- a/drivers/usb/cdns3/cdnsp-pci.c +++ b/drivers/usb/cdns3/cdnsp-pci.c @@ -29,30 +29,23 @@ #define PLAT_DRIVER_NAME "cdns-usbssp" #define CDNS_VENDOR_ID 0x17cd -#define CDNS_DEVICE_ID 0x0100 +#define CDNS_DEVICE_ID 0x0200 +#define CDNS_DRD_ID 0x0100 #define CDNS_DRD_IF (PCI_CLASS_SERIAL_USB << 8 | 0x80) static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev) { - struct pci_dev *func; - /* * Gets the second function. - * It's little tricky, but this platform has two function. - * The fist keeps resources for Host/Device while the second - * keeps resources for DRD/OTG. + * Platform has two function. The fist keeps resources for + * Host/Device while the secon keeps resources for DRD/OTG. */ - func = pci_get_device(pdev->vendor, pdev->device, NULL); - if (!func) - return NULL; + if (pdev->device == CDNS_DEVICE_ID) + return pci_get_device(pdev->vendor, CDNS_DRD_ID, NULL); + else if (pdev->device == CDNS_DRD_ID) + return pci_get_device(pdev->vendor, CDNS_DEVICE_ID, NULL); - if (func->devfn == pdev->devfn) { - func = pci_get_device(pdev->vendor, pdev->device, func); - if (!func) - return NULL; - } - - return func; + return NULL; } static int cdnsp_pci_probe(struct pci_dev *pdev, @@ -230,6 +223,8 @@ static const struct pci_device_id cdnsp_pci_ids[] = { PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID }, { PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, CDNS_DRD_IF, PCI_ANY_ID }, + { PCI_VENDOR_ID_CDNS, CDNS_DRD_ID, PCI_ANY_ID, PCI_ANY_ID, + CDNS_DRD_IF, PCI_ANY_ID }, { 0, } }; diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h index 005c67cb3afb..f210b7489fd5 100644 --- a/drivers/usb/chipidea/ci.h +++ b/drivers/usb/chipidea/ci.h @@ -208,6 +208,7 @@ struct hw_bank { * @in_lpm: if the core in low power mode * @wakeup_int: if wakeup interrupt occur * @rev: The revision number for controller + * @mutex: protect code from concorrent running when doing role switch */ struct ci_hdrc { struct device *dev; @@ -260,6 +261,7 @@ struct ci_hdrc { bool in_lpm; bool wakeup_int; enum ci_revision rev; + struct mutex mutex; }; static inline struct ci_role_driver *ci_role(struct ci_hdrc *ci) diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 27c601296130..281fc51720ce 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -984,9 +984,16 @@ static ssize_t role_store(struct device *dev, strlen(ci->roles[role]->name))) break; - if (role == CI_ROLE_END || role == ci->role) + if (role == CI_ROLE_END) return -EINVAL; + mutex_lock(&ci->mutex); + + if (role == ci->role) { + mutex_unlock(&ci->mutex); + return n; + } + pm_runtime_get_sync(dev); disable_irq(ci->irq); ci_role_stop(ci); @@ -995,6 +1002,7 @@ static ssize_t role_store(struct device *dev, ci_handle_vbus_change(ci); enable_irq(ci->irq); pm_runtime_put_sync(dev); + mutex_unlock(&ci->mutex); return (ret == 0) ? n : ret; } @@ -1030,6 +1038,7 @@ static int ci_hdrc_probe(struct platform_device *pdev) return -ENOMEM; spin_lock_init(&ci->lock); + mutex_init(&ci->mutex); ci->dev = dev; ci->platdata = dev_get_platdata(dev); ci->imx28_write_fix = !!(ci->platdata->flags & diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c index 622c3b68aa1e..f5490f2a5b6b 100644 --- a/drivers/usb/chipidea/otg.c +++ b/drivers/usb/chipidea/otg.c @@ -167,8 +167,10 @@ static int hw_wait_vbus_lower_bsv(struct ci_hdrc *ci) void ci_handle_id_switch(struct ci_hdrc *ci) { - enum ci_role role = ci_otg_role(ci); + enum ci_role role; + mutex_lock(&ci->mutex); + role = ci_otg_role(ci); if (role != ci->role) { dev_dbg(ci->dev, "switching from %s to %s\n", ci_role(ci)->name, ci->roles[role]->name); @@ -198,6 +200,7 @@ void ci_handle_id_switch(struct ci_hdrc *ci) if (role == CI_ROLE_GADGET) ci_handle_vbus_change(ci); } + mutex_unlock(&ci->mutex); } /** * ci_otg_work - perform otg (vbus/id) event handle diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c index d8d6493bc457..a8605b02115b 100644 --- a/drivers/usb/dwc2/drd.c +++ b/drivers/usb/dwc2/drd.c @@ -35,7 +35,8 @@ static void dwc2_ovr_init(struct dwc2_hsotg *hsotg) spin_unlock_irqrestore(&hsotg->lock, flags); - dwc2_force_mode(hsotg, (hsotg->dr_mode == USB_DR_MODE_HOST)); + dwc2_force_mode(hsotg, (hsotg->dr_mode == USB_DR_MODE_HOST) || + (hsotg->role_sw_default_mode == USB_DR_MODE_HOST)); } static int dwc2_ovr_avalid(struct dwc2_hsotg *hsotg, bool valid) diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 62fa6378d2d7..8b15742d9e8a 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -4549,8 +4549,7 @@ static int dwc2_hsotg_udc_start(struct usb_gadget *gadget, hsotg->gadget.dev.of_node = hsotg->dev->of_node; hsotg->gadget.speed = USB_SPEED_UNKNOWN; - if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL || - (hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg))) { + if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) { ret = dwc2_lowlevel_hw_enable(hsotg); if (ret) goto err; @@ -4612,8 +4611,7 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget) if (!IS_ERR_OR_NULL(hsotg->uphy)) otg_set_peripheral(hsotg->uphy->otg, NULL); - if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL || - (hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg))) + if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) dwc2_lowlevel_hw_disable(hsotg); return 0; diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index 23ef75996823..d1589ba7d322 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -91,13 +91,6 @@ static int dwc2_get_dr_mode(struct dwc2_hsotg *hsotg) return 0; } -static void __dwc2_disable_regulators(void *data) -{ - struct dwc2_hsotg *hsotg = data; - - regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies); -} - static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg) { struct platform_device *pdev = to_platform_device(hsotg->dev); @@ -108,11 +101,6 @@ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg) if (ret) return ret; - ret = devm_add_action_or_reset(&pdev->dev, - __dwc2_disable_regulators, hsotg); - if (ret) - return ret; - if (hsotg->clk) { ret = clk_prepare_enable(hsotg->clk); if (ret) @@ -168,7 +156,7 @@ static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg) if (hsotg->clk) clk_disable_unprepare(hsotg->clk); - return 0; + return regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies); } /** @@ -576,8 +564,7 @@ static int dwc2_driver_probe(struct platform_device *dev) dwc2_debugfs_init(hsotg); /* Gadget code manages lowlevel hw on its own */ - if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL || - (hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg))) + if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) dwc2_lowlevel_hw_disable(hsotg); #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ @@ -608,7 +595,7 @@ error_init: if (hsotg->params.activate_stm_id_vb_detection) regulator_disable(hsotg->usb33d); error: - if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) + if (hsotg->ll_hw_enabled) dwc2_lowlevel_hw_disable(hsotg); return retval; } diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 582ebd9cf9c2..4743e918dcaf 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -1098,7 +1098,7 @@ struct dwc3_scratchpad_array { * change quirk. * @dis_tx_ipgap_linecheck_quirk: set if we disable u2mac linestate * check during HS transmit. - * @resume-hs-terminations: Set if we enable quirk for fixing improper crc + * @resume_hs_terminations: Set if we enable quirk for fixing improper crc * generation after resume from suspend. * @parkmode_disable_ss_quirk: set if we need to disable all SuperSpeed * instances in park mode. diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 3c63fa97a680..cf5b4f49c3ed 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1699,6 +1699,7 @@ static int __dwc3_gadget_get_frame(struct dwc3 *dwc) */ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt) { + struct dwc3 *dwc = dep->dwc; struct dwc3_gadget_ep_cmd_params params; u32 cmd; int ret; @@ -1722,10 +1723,13 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int WARN_ON_ONCE(ret); dep->resource_index = 0; - if (!interrupt) + if (!interrupt) { + if (!DWC3_IP_IS(DWC3) || DWC3_VER_IS_PRIOR(DWC3, 310A)) + mdelay(1); dep->flags &= ~DWC3_EP_TRANSFER_STARTED; - else if (!ret) + } else if (!ret) { dep->flags |= DWC3_EP_END_TRANSFER_PENDING; + } dep->flags &= ~DWC3_EP_DELAY_STOP; return ret; @@ -3774,7 +3778,11 @@ void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, * enabled, the EndTransfer command will have completed upon * returning from this function. * - * This mode is NOT available on the DWC_usb31 IP. + * This mode is NOT available on the DWC_usb31 IP. In this + * case, if the IOC bit is not set, then delay by 1ms + * after issuing the EndTransfer command. This allows for the + * controller to handle the command completely before DWC3 + * remove requests attempts to unmap USB request buffers. */ __dwc3_stop_active_transfer(dep, force, interrupt); diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index fa7dd6cf014d..5377d873c08e 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -2079,10 +2079,9 @@ unknown: sizeof(url_descriptor->URL) - WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_offset); - if (ctrl->wLength < WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH - + landing_page_length) - landing_page_length = ctrl->wLength - - WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_offset; + if (w_length < WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_length) + landing_page_length = w_length + - WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_offset; memcpy(url_descriptor->URL, cdev->landing_page + landing_page_offset, diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c index c1f62e91b012..4a42574b4a7f 100644 --- a/drivers/usb/gadget/function/u_audio.c +++ b/drivers/usb/gadget/function/u_audio.c @@ -1422,7 +1422,7 @@ void g_audio_cleanup(struct g_audio *g_audio) uac = g_audio->uac; card = uac->card; if (card) - snd_card_free(card); + snd_card_free_when_closed(card); kfree(uac->p_prm.reqs); kfree(uac->c_prm.reqs); diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c index 5402e4b7267b..12fc6eb67c3b 100644 --- a/drivers/usb/misc/onboard_usb_hub.c +++ b/drivers/usb/misc/onboard_usb_hub.c @@ -410,6 +410,7 @@ static const struct usb_device_id onboard_hub_id_table[] = { { USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 */ { USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 */ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */ + { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 */ { USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */ { USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */ { USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 */ diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h index 0a943a154649..aca5f50eb0da 100644 --- a/drivers/usb/misc/onboard_usb_hub.h +++ b/drivers/usb/misc/onboard_usb_hub.h @@ -36,6 +36,7 @@ static const struct onboard_hub_pdata vialab_vl817_data = { static const struct of_device_id onboard_hub_match[] = { { .compatible = "usb424,2514", .data = µchip_usb424_data, }, + { .compatible = "usb424,2517", .data = µchip_usb424_data, }, { .compatible = "usb451,8140", .data = &ti_tusb8041_data, }, { .compatible = "usb451,8142", .data = &ti_tusb8041_data, }, { .compatible = "usb5e3,608", .data = &genesys_gl850g_data, }, diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index c7b763d6d102..1f8c9b16a0fb 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -111,6 +111,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_BROKEN_FUA), +/* Reported by: Yaroslav Furman <yaro330@gmail.com> */ +UNUSUAL_DEV(0x152d, 0x0583, 0x0000, 0x9999, + "JMicron", + "JMS583Gen 2", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_REPORT_OPCODES), + /* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */ UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999, "PNY", diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index a0d943d78580..1ee774c263f0 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -1445,10 +1445,18 @@ static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams) static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header, const u32 *data, int cnt) { + u32 vdo_hdr = port->vdo_data[0]; + WARN_ON(!mutex_is_locked(&port->lock)); - /* Make sure we are not still processing a previous VDM packet */ - WARN_ON(port->vdm_state > VDM_STATE_DONE); + /* If is sending discover_identity, handle received message first */ + if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMD(vdo_hdr) == CMD_DISCOVER_IDENT) { + port->send_discover = true; + mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS); + } else { + /* Make sure we are not still processing a previous VDM packet */ + WARN_ON(port->vdm_state > VDM_STATE_DONE); + } port->vdo_count = cnt + 1; port->vdo_data[0] = header; @@ -1948,11 +1956,13 @@ static void vdm_run_state_machine(struct tcpm_port *port) switch (PD_VDO_CMD(vdo_hdr)) { case CMD_DISCOVER_IDENT: res = tcpm_ams_start(port, DISCOVER_IDENTITY); - if (res == 0) + if (res == 0) { port->send_discover = false; - else if (res == -EAGAIN) + } else if (res == -EAGAIN) { + port->vdo_data[0] = 0; mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS); + } break; case CMD_DISCOVER_SVID: res = tcpm_ams_start(port, DISCOVER_SVIDS); @@ -2035,6 +2045,7 @@ static void vdm_run_state_machine(struct tcpm_port *port) unsigned long timeout; port->vdm_retries = 0; + port->vdo_data[0] = 0; port->vdm_state = VDM_STATE_BUSY; timeout = vdm_ready_timeout(vdo_hdr); mod_vdm_delayed_work(port, timeout); @@ -4570,6 +4581,9 @@ static void run_state_machine(struct tcpm_port *port) case SOFT_RESET: port->message_id = 0; port->rx_msgid = -1; + /* remove existing capabilities */ + usb_power_delivery_unregister_capabilities(port->partner_source_caps); + port->partner_source_caps = NULL; tcpm_pd_send_control(port, PD_CTRL_ACCEPT); tcpm_ams_finish(port); if (port->pwr_role == TYPEC_SOURCE) { @@ -4589,6 +4603,9 @@ static void run_state_machine(struct tcpm_port *port) case SOFT_RESET_SEND: port->message_id = 0; port->rx_msgid = -1; + /* remove existing capabilities */ + usb_power_delivery_unregister_capabilities(port->partner_source_caps); + port->partner_source_caps = NULL; if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET)) tcpm_set_state_cond(port, hard_reset_state(port), 0); else @@ -4718,6 +4735,9 @@ static void run_state_machine(struct tcpm_port *port) tcpm_set_state(port, SNK_STARTUP, 0); break; case PR_SWAP_SNK_SRC_SINK_OFF: + /* will be source, remove existing capabilities */ + usb_power_delivery_unregister_capabilities(port->partner_source_caps); + port->partner_source_caps = NULL; /* * Prevent vbus discharge circuit from turning on during PR_SWAP * as this is not a disconnect. diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index f632350f6dcb..8d1baf28df55 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -1125,12 +1125,11 @@ static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con) return NULL; } -static int ucsi_register_port(struct ucsi *ucsi, int index) +static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con) { struct usb_power_delivery_desc desc = { ucsi->cap.pd_version}; struct usb_power_delivery_capabilities_desc pd_caps; struct usb_power_delivery_capabilities *pd_cap; - struct ucsi_connector *con = &ucsi->connector[index]; struct typec_capability *cap = &con->typec_cap; enum typec_accessory *accessory = cap->accessory; enum usb_role u_role = USB_ROLE_NONE; @@ -1151,7 +1150,6 @@ static int ucsi_register_port(struct ucsi *ucsi, int index) init_completion(&con->complete); mutex_init(&con->lock); INIT_LIST_HEAD(&con->partner_tasks); - con->num = index + 1; con->ucsi = ucsi; cap->fwnode = ucsi_find_fwnode(con); @@ -1328,8 +1326,8 @@ out_unlock: */ static int ucsi_init(struct ucsi *ucsi) { - struct ucsi_connector *con; - u64 command; + struct ucsi_connector *con, *connector; + u64 command, ntfy; int ret; int i; @@ -1341,8 +1339,8 @@ static int ucsi_init(struct ucsi *ucsi) } /* Enable basic notifications */ - ucsi->ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR; - command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy; + ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR; + command = UCSI_SET_NOTIFICATION_ENABLE | ntfy; ret = ucsi_send_command(ucsi, command, NULL, 0); if (ret < 0) goto err_reset; @@ -1359,31 +1357,33 @@ static int ucsi_init(struct ucsi *ucsi) } /* Allocate the connectors. Released in ucsi_unregister() */ - ucsi->connector = kcalloc(ucsi->cap.num_connectors + 1, - sizeof(*ucsi->connector), GFP_KERNEL); - if (!ucsi->connector) { + connector = kcalloc(ucsi->cap.num_connectors + 1, sizeof(*connector), GFP_KERNEL); + if (!connector) { ret = -ENOMEM; goto err_reset; } /* Register all connectors */ for (i = 0; i < ucsi->cap.num_connectors; i++) { - ret = ucsi_register_port(ucsi, i); + connector[i].num = i + 1; + ret = ucsi_register_port(ucsi, &connector[i]); if (ret) goto err_unregister; } /* Enable all notifications */ - ucsi->ntfy = UCSI_ENABLE_NTFY_ALL; - command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy; + ntfy = UCSI_ENABLE_NTFY_ALL; + command = UCSI_SET_NOTIFICATION_ENABLE | ntfy; ret = ucsi_send_command(ucsi, command, NULL, 0); if (ret < 0) goto err_unregister; + ucsi->connector = connector; + ucsi->ntfy = ntfy; return 0; err_unregister: - for (con = ucsi->connector; con->port; con++) { + for (con = connector; con->port; con++) { ucsi_unregister_partner(con); ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON); ucsi_unregister_port_psy(con); @@ -1399,10 +1399,7 @@ err_unregister: typec_unregister_port(con->port); con->port = NULL; } - - kfree(ucsi->connector); - ucsi->connector = NULL; - + kfree(connector); err_reset: memset(&ucsi->cap, 0, sizeof(ucsi->cap)); ucsi_reset_ppm(ucsi); diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c index ce0c8ef80c04..62206a6b8ea7 100644 --- a/drivers/usb/typec/ucsi/ucsi_acpi.c +++ b/drivers/usb/typec/ucsi/ucsi_acpi.c @@ -78,7 +78,7 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset, if (ret) goto out_clear_bit; - if (!wait_for_completion_timeout(&ua->complete, HZ)) + if (!wait_for_completion_timeout(&ua->complete, 5 * HZ)) ret = -ETIMEDOUT; out_clear_bit: diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h index 058fbe28107e..25fc4120b618 100644 --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h @@ -96,6 +96,7 @@ struct mlx5_vdpa_dev { struct mlx5_control_vq cvq; struct workqueue_struct *wq; unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS]; + bool suspended; }; int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid); diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 3a0e721aef05..520646ae7fa0 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -2438,7 +2438,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, if (err) goto err_mr; - if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) + if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || mvdev->suspended) goto err_mr; restore_channels_info(ndev); @@ -2606,6 +2606,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev) clear_vqs_ready(ndev); mlx5_vdpa_destroy_mr(&ndev->mvdev); ndev->mvdev.status = 0; + ndev->mvdev.suspended = false; ndev->cur_num_vqs = 0; ndev->mvdev.cvq.received_desc = 0; ndev->mvdev.cvq.completed_desc = 0; @@ -2852,6 +2853,8 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev) struct mlx5_vdpa_virtqueue *mvq; int i; + mlx5_vdpa_info(mvdev, "suspending device\n"); + down_write(&ndev->reslock); ndev->nb_registered = false; mlx5_notifier_unregister(mvdev->mdev, &ndev->nb); @@ -2861,6 +2864,7 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev) suspend_vq(ndev, mvq); } mlx5_vdpa_cvq_suspend(mvdev); + mvdev->suspended = true; up_write(&ndev->reslock); return 0; } diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index 6a0a65814626..eea23c630f7c 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -68,6 +68,17 @@ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx) (uintptr_t)vq->device_addr); vq->vring.last_avail_idx = last_avail_idx; + + /* + * Since vdpa_sim does not support receive inflight descriptors as a + * destination of a migration, let's set both avail_idx and used_idx + * the same at vq start. This is how vhost-user works in a + * VHOST_SET_VRING_BASE call. + * + * Although the simple fix is to set last_used_idx at + * vdpasim_set_vq_state, it would be reset at vdpasim_queue_ready. + */ + vq->vring.last_used_idx = last_avail_idx; vq->vring.notify = vdpasim_vq_notify; } diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c index 8fe267ca3e76..281287fae89f 100644 --- a/drivers/vdpa/virtio_pci/vp_vdpa.c +++ b/drivers/vdpa/virtio_pci/vp_vdpa.c @@ -645,8 +645,8 @@ static void vp_vdpa_remove(struct pci_dev *pdev) struct virtio_pci_modern_device *mdev = NULL; mdev = vp_vdpa_mgtdev->mdev; - vp_modern_remove(mdev); vdpa_mgmtdev_unregister(&vp_vdpa_mgtdev->mgtdev); + vp_modern_remove(mdev); kfree(vp_vdpa_mgtdev->mgtdev.id_table); kfree(mdev); kfree(vp_vdpa_mgtdev); diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c index e897537a9e8a..d95fd382814c 100644 --- a/drivers/vfio/pci/mlx5/main.c +++ b/drivers/vfio/pci/mlx5/main.c @@ -442,16 +442,10 @@ static long mlx5vf_precopy_ioctl(struct file *filp, unsigned int cmd, if (migf->pre_copy_initial_bytes > *pos) { info.initial_bytes = migf->pre_copy_initial_bytes - *pos; } else { - buf = mlx5vf_get_data_buff_from_pos(migf, *pos, &end_of_data); - if (buf) { - info.dirty_bytes = buf->start_pos + buf->length - *pos; - } else { - if (!end_of_data) { - ret = -EINVAL; - goto err_migf_unlock; - } - info.dirty_bytes = inc_length; - } + info.dirty_bytes = migf->max_pos - *pos; + if (!info.dirty_bytes) + end_of_data = true; + info.dirty_bytes += inc_length; } if (!end_of_data || !inc_length) { diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index dc12dbd5b43b..7be9d9d8f01c 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -1169,6 +1169,7 @@ static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v) err_attach: iommu_domain_free(v->domain); + v->domain = NULL; return ret; } @@ -1213,6 +1214,7 @@ static void vhost_vdpa_cleanup(struct vhost_vdpa *v) vhost_vdpa_remove_as(v, asid); } + vhost_vdpa_free_domain(v); vhost_dev_cleanup(&v->vdev); kfree(v->vdev.vqs); } @@ -1285,7 +1287,6 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep) vhost_vdpa_clean_irq(v); vhost_vdpa_reset(v); vhost_dev_stop(&v->vdev); - vhost_vdpa_free_domain(v); vhost_vdpa_config_put(v); vhost_vdpa_cleanup(v); mutex_unlock(&d->mutex); diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c index f65c96d1394d..e45338227be6 100644 --- a/drivers/video/fbdev/amba-clcd.c +++ b/drivers/video/fbdev/amba-clcd.c @@ -854,7 +854,7 @@ static struct clcd_board *clcdfb_of_get_board(struct amba_device *dev) board->caps = CLCD_CAP_ALL; board->check = clcdfb_check; board->decode = clcdfb_decode; - if (of_find_property(node, "memory-region", NULL)) { + if (of_property_present(node, "memory-region")) { board->setup = clcdfb_of_vram_setup; board->mmap = clcdfb_of_vram_mmap; board->remove = clcdfb_of_vram_remove; diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c index 81c315454428..b6b22fa4a8a0 100644 --- a/drivers/video/fbdev/au1200fb.c +++ b/drivers/video/fbdev/au1200fb.c @@ -1040,6 +1040,9 @@ static int au1200fb_fb_check_var(struct fb_var_screeninfo *var, u32 pixclock; int screen_size, plane; + if (!var->pixclock) + return -EINVAL; + plane = fbdev->plane; /* Make sure that the mode respect all LCD controller and diff --git a/drivers/video/fbdev/bw2.c b/drivers/video/fbdev/bw2.c index 6403ae07970d..9cbadcd18b25 100644 --- a/drivers/video/fbdev/bw2.c +++ b/drivers/video/fbdev/bw2.c @@ -306,7 +306,7 @@ static int bw2_probe(struct platform_device *op) if (!par->regs) goto out_release_fb; - if (!of_find_property(dp, "width", NULL)) { + if (!of_property_present(dp, "width")) { err = bw2_do_default_mode(par, info, &linebytes); if (err) goto out_unmap_regs; diff --git a/drivers/video/fbdev/cg3.c b/drivers/video/fbdev/cg3.c index bdcc3f6ab666..3a37fff4df36 100644 --- a/drivers/video/fbdev/cg3.c +++ b/drivers/video/fbdev/cg3.c @@ -393,7 +393,7 @@ static int cg3_probe(struct platform_device *op) cg3_blank(FB_BLANK_UNBLANK, info); - if (!of_find_property(dp, "width", NULL)) { + if (!of_property_present(dp, "width")) { err = cg3_do_default_mode(par); if (err) goto out_unmap_screen; diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c index cc37ec3f8fc1..7799d52a651f 100644 --- a/drivers/video/fbdev/chipsfb.c +++ b/drivers/video/fbdev/chipsfb.c @@ -358,16 +358,21 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent) if (rc) return rc; - if (pci_enable_device(dp) < 0) { + rc = pci_enable_device(dp); + if (rc < 0) { dev_err(&dp->dev, "Cannot enable PCI device\n"); goto err_out; } - if ((dp->resource[0].flags & IORESOURCE_MEM) == 0) + if ((dp->resource[0].flags & IORESOURCE_MEM) == 0) { + rc = -ENODEV; goto err_disable; + } addr = pci_resource_start(dp, 0); - if (addr == 0) + if (addr == 0) { + rc = -ENODEV; goto err_disable; + } p = framebuffer_alloc(0, &dp->dev); if (p == NULL) { @@ -417,7 +422,8 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent) init_chips(p, addr); - if (register_framebuffer(p) < 0) { + rc = register_framebuffer(p); + if (rc < 0) { dev_err(&dp->dev,"C&T 65550 framebuffer failed to register\n"); goto err_unmap; } diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c index 45c75ff01eca..c8bfc608bd9c 100644 --- a/drivers/video/fbdev/clps711x-fb.c +++ b/drivers/video/fbdev/clps711x-fb.c @@ -238,8 +238,7 @@ static int clps711x_fb_probe(struct platform_device *pdev) info->fix.mmio_start = res->start; info->fix.mmio_len = resource_size(res); - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - info->screen_base = devm_ioremap_resource(dev, res); + info->screen_base = devm_platform_get_and_ioremap_resource(pdev, 1, &res); if (IS_ERR(info->screen_base)) { ret = PTR_ERR(info->screen_base); goto out_fb_release; diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c index aa5f059d0222..274f5d0fa247 100644 --- a/drivers/video/fbdev/core/fb_defio.c +++ b/drivers/video/fbdev/core/fb_defio.c @@ -305,17 +305,18 @@ void fb_deferred_io_open(struct fb_info *info, struct inode *inode, struct file *file) { + struct fb_deferred_io *fbdefio = info->fbdefio; + file->f_mapping->a_ops = &fb_deferred_io_aops; + fbdefio->open_count++; } EXPORT_SYMBOL_GPL(fb_deferred_io_open); -void fb_deferred_io_release(struct fb_info *info) +static void fb_deferred_io_lastclose(struct fb_info *info) { - struct fb_deferred_io *fbdefio = info->fbdefio; struct page *page; int i; - BUG_ON(!fbdefio); cancel_delayed_work_sync(&info->deferred_work); /* clear out the mapping that we setup */ @@ -324,13 +325,21 @@ void fb_deferred_io_release(struct fb_info *info) page->mapping = NULL; } } + +void fb_deferred_io_release(struct fb_info *info) +{ + struct fb_deferred_io *fbdefio = info->fbdefio; + + if (!--fbdefio->open_count) + fb_deferred_io_lastclose(info); +} EXPORT_SYMBOL_GPL(fb_deferred_io_release); void fb_deferred_io_cleanup(struct fb_info *info) { struct fb_deferred_io *fbdefio = info->fbdefio; - fb_deferred_io_release(info); + fb_deferred_io_lastclose(info); kvfree(info->pagerefs); mutex_destroy(&fbdefio->lock); diff --git a/drivers/video/fbdev/geode/lxfb_core.c b/drivers/video/fbdev/geode/lxfb_core.c index 8130e9eee2b4..556d8b1a9e06 100644 --- a/drivers/video/fbdev/geode/lxfb_core.c +++ b/drivers/video/fbdev/geode/lxfb_core.c @@ -235,6 +235,9 @@ static void get_modedb(struct fb_videomode **modedb, unsigned int *size) static int lxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { + if (!var->pixclock) + return -EINVAL; + if (var->xres > 1920 || var->yres > 1440) return -EINVAL; diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c index 0a9e5067b201..a81095b2b1ea 100644 --- a/drivers/video/fbdev/intelfb/intelfbdrv.c +++ b/drivers/video/fbdev/intelfb/intelfbdrv.c @@ -1222,6 +1222,9 @@ static int intelfb_check_var(struct fb_var_screeninfo *var, dinfo = GET_DINFO(info); + if (!var->pixclock) + return -EINVAL; + /* update the pitch */ if (intelfbhw_validate_mode(dinfo, var) != 0) return -EINVAL; diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c index e60a276b4855..ea4ba3dfb96b 100644 --- a/drivers/video/fbdev/nvidia/nvidia.c +++ b/drivers/video/fbdev/nvidia/nvidia.c @@ -764,6 +764,8 @@ static int nvidiafb_check_var(struct fb_var_screeninfo *var, int pitch, err = 0; NVTRACE_ENTER(); + if (!var->pixclock) + return -EINVAL; var->transp.offset = 0; var->transp.length = 0; diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c index f7ad6bc9d02d..b97d251d894b 100644 --- a/drivers/video/fbdev/offb.c +++ b/drivers/video/fbdev/offb.c @@ -549,10 +549,10 @@ static void offb_init_nodriver(struct platform_device *parent, struct device_nod int foreign_endian = 0; #ifdef __BIG_ENDIAN - if (of_get_property(dp, "little-endian", NULL)) + if (of_property_read_bool(dp, "little-endian")) foreign_endian = FBINFO_FOREIGN_ENDIAN; #else - if (of_get_property(dp, "big-endian", NULL)) + if (of_property_read_bool(dp, "big-endian")) foreign_endian = FBINFO_FOREIGN_ENDIAN; #endif diff --git a/drivers/video/fbdev/omap/Makefile b/drivers/video/fbdev/omap/Makefile index 504edb9c09dd..6d5082c76919 100644 --- a/drivers/video/fbdev/omap/Makefile +++ b/drivers/video/fbdev/omap/Makefile @@ -18,7 +18,6 @@ objs-y$(CONFIG_FB_OMAP_LCDC_HWA742) += hwa742.o lcds-y$(CONFIG_MACH_AMS_DELTA) += lcd_ams_delta.o lcds-y$(CONFIG_MACH_OMAP_PALMTE) += lcd_palmte.o -lcds-y$(CONFIG_MACH_OMAP_OSK) += lcd_osk.o lcds-y$(CONFIG_FB_OMAP_LCD_MIPID) += lcd_mipid.o diff --git a/drivers/video/fbdev/omap/lcd_osk.c b/drivers/video/fbdev/omap/lcd_osk.c deleted file mode 100644 index 8168ba0d47fd..000000000000 --- a/drivers/video/fbdev/omap/lcd_osk.c +++ /dev/null @@ -1,86 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * LCD panel support for the TI OMAP OSK board - * - * Copyright (C) 2004 Nokia Corporation - * Author: Imre Deak <imre.deak@nokia.com> - * Adapted for OSK by <dirk.behme@de.bosch.com> - */ - -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/gpio.h> - -#include <linux/soc/ti/omap1-io.h> -#include <linux/soc/ti/omap1-mux.h> - -#include "omapfb.h" - -static int osk_panel_enable(struct lcd_panel *panel) -{ - /* configure PWL pin */ - omap_cfg_reg(PWL); - - /* Enable PWL unit */ - omap_writeb(0x01, OMAP_PWL_CLK_ENABLE); - - /* Set PWL level */ - omap_writeb(0xFF, OMAP_PWL_ENABLE); - - /* set GPIO2 high (lcd power enabled) */ - gpio_set_value(2, 1); - - return 0; -} - -static void osk_panel_disable(struct lcd_panel *panel) -{ - /* Set PWL level to zero */ - omap_writeb(0x00, OMAP_PWL_ENABLE); - - /* Disable PWL unit */ - omap_writeb(0x00, OMAP_PWL_CLK_ENABLE); - - /* set GPIO2 low */ - gpio_set_value(2, 0); -} - -static struct lcd_panel osk_panel = { - .name = "osk", - .config = OMAP_LCDC_PANEL_TFT, - - .bpp = 16, - .data_lines = 16, - .x_res = 240, - .y_res = 320, - .pixel_clock = 12500, - .hsw = 40, - .hfp = 40, - .hbp = 72, - .vsw = 1, - .vfp = 1, - .vbp = 0, - .pcd = 12, - - .enable = osk_panel_enable, - .disable = osk_panel_disable, -}; - -static int osk_panel_probe(struct platform_device *pdev) -{ - omapfb_register_panel(&osk_panel); - return 0; -} - -static struct platform_driver osk_panel_driver = { - .probe = osk_panel_probe, - .driver = { - .name = "lcd_osk", - }, -}; - -module_platform_driver(osk_panel_driver); - -MODULE_AUTHOR("Imre Deak"); -MODULE_DESCRIPTION("LCD panel support for the TI OMAP OSK board"); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c index 1f3df2055ff0..18736079843d 100644 --- a/drivers/video/fbdev/omap/omapfb_main.c +++ b/drivers/video/fbdev/omap/omapfb_main.c @@ -544,19 +544,25 @@ static int set_fb_var(struct fb_info *fbi, var->yoffset = var->yres_virtual - var->yres; if (plane->color_mode == OMAPFB_COLOR_RGB444) { - var->red.offset = 8; var->red.length = 4; - var->red.msb_right = 0; - var->green.offset = 4; var->green.length = 4; - var->green.msb_right = 0; - var->blue.offset = 0; var->blue.length = 4; - var->blue.msb_right = 0; + var->red.offset = 8; + var->red.length = 4; + var->red.msb_right = 0; + var->green.offset = 4; + var->green.length = 4; + var->green.msb_right = 0; + var->blue.offset = 0; + var->blue.length = 4; + var->blue.msb_right = 0; } else { - var->red.offset = 11; var->red.length = 5; - var->red.msb_right = 0; - var->green.offset = 5; var->green.length = 6; - var->green.msb_right = 0; - var->blue.offset = 0; var->blue.length = 5; - var->blue.msb_right = 0; + var->red.offset = 11; + var->red.length = 5; + var->red.msb_right = 0; + var->green.offset = 5; + var->green.length = 6; + var->green.msb_right = 0; + var->blue.offset = 0; + var->blue.length = 5; + var->blue.msb_right = 0; } var->height = -1; diff --git a/drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c b/drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c index 0ae0cab252d3..09f719af0d0c 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c @@ -192,7 +192,7 @@ static int __init omapdss_boot_init(void) omapdss_walk_device(dss, true); for_each_available_child_of_node(dss, child) { - if (!of_find_property(child, "compatible", NULL)) + if (!of_property_present(child, "compatible")) continue; omapdss_walk_device(child, true); diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c index c3cd1e1cc01b..d16729215423 100644 --- a/drivers/video/fbdev/pxa3xx-gcu.c +++ b/drivers/video/fbdev/pxa3xx-gcu.c @@ -599,8 +599,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev) priv->misc_dev.fops = &pxa3xx_gcu_miscdev_fops; /* handle IO resources */ - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->mmio_base = devm_ioremap_resource(dev, r); + priv->mmio_base = devm_platform_get_and_ioremap_resource(pdev, 0, &r); if (IS_ERR(priv->mmio_base)) return PTR_ERR(priv->mmio_base); diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c index f743bfbde2a6..1f3cbe723def 100644 --- a/drivers/video/fbdev/sm501fb.c +++ b/drivers/video/fbdev/sm501fb.c @@ -1737,10 +1737,10 @@ static int sm501fb_init_fb(struct fb_info *fb, enum sm501_controller head, #if defined(CONFIG_OF) #ifdef __BIG_ENDIAN - if (of_get_property(info->dev->parent->of_node, "little-endian", NULL)) + if (of_property_read_bool(info->dev->parent->of_node, "little-endian")) fb->flags |= FBINFO_FOREIGN_ENDIAN; #else - if (of_get_property(info->dev->parent->of_node, "big-endian", NULL)) + if (of_property_read_bool(info->dev->parent->of_node, "big-endian")) fb->flags |= FBINFO_FOREIGN_ENDIAN; #endif #endif diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c index 3feb6e40d56d..ef8a4c5fc687 100644 --- a/drivers/video/fbdev/stifb.c +++ b/drivers/video/fbdev/stifb.c @@ -922,6 +922,28 @@ SETUP_HCRX(struct stifb_info *fb) /* ------------------- driver specific functions --------------------------- */ static int +stifb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) +{ + struct stifb_info *fb = container_of(info, struct stifb_info, info); + + if (var->xres != fb->info.var.xres || + var->yres != fb->info.var.yres || + var->bits_per_pixel != fb->info.var.bits_per_pixel) + return -EINVAL; + + var->xres_virtual = var->xres; + var->yres_virtual = var->yres; + var->xoffset = 0; + var->yoffset = 0; + var->grayscale = fb->info.var.grayscale; + var->red.length = fb->info.var.red.length; + var->green.length = fb->info.var.green.length; + var->blue.length = fb->info.var.blue.length; + + return 0; +} + +static int stifb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { @@ -1145,6 +1167,7 @@ stifb_init_display(struct stifb_info *fb) static const struct fb_ops stifb_ops = { .owner = THIS_MODULE, + .fb_check_var = stifb_check_var, .fb_setcolreg = stifb_setcolreg, .fb_blank = stifb_blank, .fb_fillrect = stifb_fillrect, @@ -1164,6 +1187,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) struct stifb_info *fb; struct fb_info *info; unsigned long sti_rom_address; + char modestr[32]; char *dev_name; int bpp, xres, yres; @@ -1342,6 +1366,9 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref) info->flags = FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT; info->pseudo_palette = &fb->pseudo_palette; + scnprintf(modestr, sizeof(modestr), "%dx%d-%d", xres, yres, bpp); + fb_find_mode(&info->var, info, modestr, NULL, 0, NULL, bpp); + /* This has to be done !!! */ if (fb_alloc_cmap(&info->cmap, NR_PALETTE, 0)) goto out_err1; diff --git a/drivers/video/fbdev/tcx.c b/drivers/video/fbdev/tcx.c index 01d87f53324d..f2eaf6e7fff6 100644 --- a/drivers/video/fbdev/tcx.c +++ b/drivers/video/fbdev/tcx.c @@ -379,8 +379,7 @@ static int tcx_probe(struct platform_device *op) spin_lock_init(&par->lock); - par->lowdepth = - (of_find_property(dp, "tcx-8-bit", NULL) != NULL); + par->lowdepth = of_property_read_bool(dp, "tcx-8-bit"); sbusfb_fill_var(&info->var, dp, 8); info->var.red.length = 8; diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c index 14d37c49633c..b44004880f0d 100644 --- a/drivers/video/fbdev/tgafb.c +++ b/drivers/video/fbdev/tgafb.c @@ -173,6 +173,9 @@ tgafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct tga_par *par = (struct tga_par *)info->par; + if (!var->pixclock) + return -EINVAL; + if (par->tga_type == TGA_TYPE_8PLANE) { if (var->bits_per_pixel != 8) return -EINVAL; diff --git a/drivers/video/fbdev/wm8505fb.c b/drivers/video/fbdev/wm8505fb.c index 8f4d674fa0d0..96a6f7623e19 100644 --- a/drivers/video/fbdev/wm8505fb.c +++ b/drivers/video/fbdev/wm8505fb.c @@ -261,7 +261,6 @@ static const struct fb_ops wm8505fb_ops = { static int wm8505fb_probe(struct platform_device *pdev) { struct wm8505fb_info *fbi; - struct resource *res; struct display_timings *disp_timing; void *addr; int ret; @@ -299,8 +298,7 @@ static int wm8505fb_probe(struct platform_device *pdev) addr = addr + sizeof(struct wm8505fb_info); fbi->fb.pseudo_palette = addr; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - fbi->regbase = devm_ioremap_resource(&pdev->dev, res); + fbi->regbase = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(fbi->regbase)) return PTR_ERR(fbi->regbase); diff --git a/drivers/video/fbdev/xilinxfb.c b/drivers/video/fbdev/xilinxfb.c index 1ac83900a21c..7911354827dc 100644 --- a/drivers/video/fbdev/xilinxfb.c +++ b/drivers/video/fbdev/xilinxfb.c @@ -273,8 +273,7 @@ static int xilinxfb_assign(struct platform_device *pdev, if (drvdata->flags & BUS_ACCESS_FLAG) { struct resource *res; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - drvdata->regs = devm_ioremap_resource(&pdev->dev, res); + drvdata->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(drvdata->regs)) return PTR_ERR(drvdata->regs); @@ -469,8 +468,7 @@ static int xilinxfb_of_probe(struct platform_device *pdev) pdata.yvirt = prop[1]; } - if (of_find_property(pdev->dev.of_node, "rotate-display", NULL)) - pdata.rotate_screen = 1; + pdata.rotate_screen = of_property_read_bool(pdev->dev.of_node, "rotate-display"); platform_set_drvdata(pdev, drvdata); return xilinxfb_assign(pdev, drvdata, &pdata); diff --git a/drivers/video/logo/pnmtologo.c b/drivers/video/logo/pnmtologo.c index 4718d7895f0b..ada5ef6e51b7 100644 --- a/drivers/video/logo/pnmtologo.c +++ b/drivers/video/logo/pnmtologo.c @@ -1,15 +1,9 @@ - +// SPDX-License-Identifier: GPL-2.0-only /* * Convert a logo in ASCII PNM format to C source suitable for inclusion in * the Linux kernel * * (C) Copyright 2001-2003 by Geert Uytterhoeven <geert@linux-m68k.org> - * - * -------------------------------------------------------------------------- - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file COPYING in the main directory of the Linux - * distribution for more details. */ #include <ctype.h> @@ -34,37 +28,37 @@ static FILE *out; #define LINUX_LOGO_GRAY256 4 /* 256 levels grayscale */ static const char *logo_types[LINUX_LOGO_GRAY256+1] = { - [LINUX_LOGO_MONO] = "LINUX_LOGO_MONO", - [LINUX_LOGO_VGA16] = "LINUX_LOGO_VGA16", - [LINUX_LOGO_CLUT224] = "LINUX_LOGO_CLUT224", - [LINUX_LOGO_GRAY256] = "LINUX_LOGO_GRAY256" + [LINUX_LOGO_MONO] = "LINUX_LOGO_MONO", + [LINUX_LOGO_VGA16] = "LINUX_LOGO_VGA16", + [LINUX_LOGO_CLUT224] = "LINUX_LOGO_CLUT224", + [LINUX_LOGO_GRAY256] = "LINUX_LOGO_GRAY256" }; #define MAX_LINUX_LOGO_COLORS 224 struct color { - unsigned char red; - unsigned char green; - unsigned char blue; + unsigned char red; + unsigned char green; + unsigned char blue; }; static const struct color clut_vga16[16] = { - { 0x00, 0x00, 0x00 }, - { 0x00, 0x00, 0xaa }, - { 0x00, 0xaa, 0x00 }, - { 0x00, 0xaa, 0xaa }, - { 0xaa, 0x00, 0x00 }, - { 0xaa, 0x00, 0xaa }, - { 0xaa, 0x55, 0x00 }, - { 0xaa, 0xaa, 0xaa }, - { 0x55, 0x55, 0x55 }, - { 0x55, 0x55, 0xff }, - { 0x55, 0xff, 0x55 }, - { 0x55, 0xff, 0xff }, - { 0xff, 0x55, 0x55 }, - { 0xff, 0x55, 0xff }, - { 0xff, 0xff, 0x55 }, - { 0xff, 0xff, 0xff }, + { 0x00, 0x00, 0x00 }, + { 0x00, 0x00, 0xaa }, + { 0x00, 0xaa, 0x00 }, + { 0x00, 0xaa, 0xaa }, + { 0xaa, 0x00, 0x00 }, + { 0xaa, 0x00, 0xaa }, + { 0xaa, 0x55, 0x00 }, + { 0xaa, 0xaa, 0xaa }, + { 0x55, 0x55, 0x55 }, + { 0x55, 0x55, 0xff }, + { 0x55, 0xff, 0x55 }, + { 0x55, 0xff, 0xff }, + { 0xff, 0x55, 0x55 }, + { 0xff, 0x55, 0xff }, + { 0xff, 0xff, 0x55 }, + { 0xff, 0xff, 0xff }, }; @@ -77,438 +71,440 @@ static unsigned int logo_clutsize; static int is_plain_pbm = 0; static void die(const char *fmt, ...) - __attribute__ ((noreturn)) __attribute ((format (printf, 1, 2))); -static void usage(void) __attribute ((noreturn)); +__attribute__((noreturn)) __attribute((format (printf, 1, 2))); +static void usage(void) __attribute((noreturn)); static unsigned int get_number(FILE *fp) { - int c, val; - - /* Skip leading whitespace */ - do { - c = fgetc(fp); - if (c == EOF) - die("%s: end of file\n", filename); - if (c == '#') { - /* Ignore comments 'till end of line */ - do { + int c, val; + + /* Skip leading whitespace */ + do { c = fgetc(fp); if (c == EOF) - die("%s: end of file\n", filename); - } while (c != '\n'); + die("%s: end of file\n", filename); + if (c == '#') { + /* Ignore comments 'till end of line */ + do { + c = fgetc(fp); + if (c == EOF) + die("%s: end of file\n", filename); + } while (c != '\n'); + } + } while (isspace(c)); + + /* Parse decimal number */ + val = 0; + while (isdigit(c)) { + val = 10*val+c-'0'; + /* some PBM are 'broken'; GiMP for example exports a PBM without space + * between the digits. This is Ok cause we know a PBM can only have a '1' + * or a '0' for the digit. + */ + if (is_plain_pbm) + break; + c = fgetc(fp); + if (c == EOF) + die("%s: end of file\n", filename); } - } while (isspace(c)); - - /* Parse decimal number */ - val = 0; - while (isdigit(c)) { - val = 10*val+c-'0'; - /* some PBM are 'broken'; GiMP for example exports a PBM without space - * between the digits. This is Ok cause we know a PBM can only have a '1' - * or a '0' for the digit. */ - if (is_plain_pbm) - break; - c = fgetc(fp); - if (c == EOF) - die("%s: end of file\n", filename); - } - return val; + return val; } static unsigned int get_number255(FILE *fp, unsigned int maxval) { - unsigned int val = get_number(fp); - return (255*val+maxval/2)/maxval; + unsigned int val = get_number(fp); + + return (255*val+maxval/2)/maxval; } static void read_image(void) { - FILE *fp; - unsigned int i, j; - int magic; - unsigned int maxval; - - /* open image file */ - fp = fopen(filename, "r"); - if (!fp) - die("Cannot open file %s: %s\n", filename, strerror(errno)); - - /* check file type and read file header */ - magic = fgetc(fp); - if (magic != 'P') - die("%s is not a PNM file\n", filename); - magic = fgetc(fp); - switch (magic) { + FILE *fp; + unsigned int i, j; + int magic; + unsigned int maxval; + + /* open image file */ + fp = fopen(filename, "r"); + if (!fp) + die("Cannot open file %s: %s\n", filename, strerror(errno)); + + /* check file type and read file header */ + magic = fgetc(fp); + if (magic != 'P') + die("%s is not a PNM file\n", filename); + magic = fgetc(fp); + switch (magic) { case '1': case '2': case '3': - /* Plain PBM/PGM/PPM */ - break; + /* Plain PBM/PGM/PPM */ + break; case '4': case '5': case '6': - /* Binary PBM/PGM/PPM */ - die("%s: Binary PNM is not supported\n" + /* Binary PBM/PGM/PPM */ + die("%s: Binary PNM is not supported\n" "Use pnmnoraw(1) to convert it to ASCII PNM\n", filename); default: - die("%s is not a PNM file\n", filename); - } - logo_width = get_number(fp); - logo_height = get_number(fp); - - /* allocate image data */ - logo_data = (struct color **)malloc(logo_height*sizeof(struct color *)); - if (!logo_data) - die("%s\n", strerror(errno)); - for (i = 0; i < logo_height; i++) { - logo_data[i] = malloc(logo_width*sizeof(struct color)); + die("%s is not a PNM file\n", filename); + } + logo_width = get_number(fp); + logo_height = get_number(fp); + + /* allocate image data */ + logo_data = (struct color **)malloc(logo_height*sizeof(struct color *)); + if (!logo_data) + die("%s\n", strerror(errno)); + for (i = 0; i < logo_height; i++) { + logo_data[i] = malloc(logo_width*sizeof(struct color)); if (!logo_data[i]) - die("%s\n", strerror(errno)); - } + die("%s\n", strerror(errno)); + } - /* read image data */ - switch (magic) { + /* read image data */ + switch (magic) { case '1': - /* Plain PBM */ - is_plain_pbm = 1; - for (i = 0; i < logo_height; i++) - for (j = 0; j < logo_width; j++) - logo_data[i][j].red = logo_data[i][j].green = - logo_data[i][j].blue = 255*(1-get_number(fp)); - break; + /* Plain PBM */ + is_plain_pbm = 1; + for (i = 0; i < logo_height; i++) + for (j = 0; j < logo_width; j++) + logo_data[i][j].red = logo_data[i][j].green = + logo_data[i][j].blue = 255*(1-get_number(fp)); + break; case '2': - /* Plain PGM */ - maxval = get_number(fp); - for (i = 0; i < logo_height; i++) - for (j = 0; j < logo_width; j++) - logo_data[i][j].red = logo_data[i][j].green = - logo_data[i][j].blue = get_number255(fp, maxval); - break; + /* Plain PGM */ + maxval = get_number(fp); + for (i = 0; i < logo_height; i++) + for (j = 0; j < logo_width; j++) + logo_data[i][j].red = logo_data[i][j].green = + logo_data[i][j].blue = get_number255(fp, maxval); + break; case '3': - /* Plain PPM */ - maxval = get_number(fp); - for (i = 0; i < logo_height; i++) - for (j = 0; j < logo_width; j++) { - logo_data[i][j].red = get_number255(fp, maxval); - logo_data[i][j].green = get_number255(fp, maxval); - logo_data[i][j].blue = get_number255(fp, maxval); - } - break; - } + /* Plain PPM */ + maxval = get_number(fp); + for (i = 0; i < logo_height; i++) + for (j = 0; j < logo_width; j++) { + logo_data[i][j].red = get_number255(fp, maxval); + logo_data[i][j].green = get_number255(fp, maxval); + logo_data[i][j].blue = get_number255(fp, maxval); + } + break; + } - /* close file */ - fclose(fp); + /* close file */ + fclose(fp); } static inline int is_black(struct color c) { - return c.red == 0 && c.green == 0 && c.blue == 0; + return c.red == 0 && c.green == 0 && c.blue == 0; } static inline int is_white(struct color c) { - return c.red == 255 && c.green == 255 && c.blue == 255; + return c.red == 255 && c.green == 255 && c.blue == 255; } static inline int is_gray(struct color c) { - return c.red == c.green && c.red == c.blue; + return c.red == c.green && c.red == c.blue; } static inline int is_equal(struct color c1, struct color c2) { - return c1.red == c2.red && c1.green == c2.green && c1.blue == c2.blue; + return c1.red == c2.red && c1.green == c2.green && c1.blue == c2.blue; } static void write_header(void) { - /* open logo file */ - if (outputname) { - out = fopen(outputname, "w"); - if (!out) - die("Cannot create file %s: %s\n", outputname, strerror(errno)); - } else { - out = stdout; - } - - fputs("/*\n", out); - fputs(" * DO NOT EDIT THIS FILE!\n", out); - fputs(" *\n", out); - fprintf(out, " * It was automatically generated from %s\n", filename); - fputs(" *\n", out); - fprintf(out, " * Linux logo %s\n", logoname); - fputs(" */\n\n", out); - fputs("#include <linux/linux_logo.h>\n\n", out); - fprintf(out, "static unsigned char %s_data[] __initdata = {\n", - logoname); + /* open logo file */ + if (outputname) { + out = fopen(outputname, "w"); + if (!out) + die("Cannot create file %s: %s\n", outputname, strerror(errno)); + } else { + out = stdout; + } + + fputs("/*\n", out); + fputs(" * DO NOT EDIT THIS FILE!\n", out); + fputs(" *\n", out); + fprintf(out, " * It was automatically generated from %s\n", filename); + fputs(" *\n", out); + fprintf(out, " * Linux logo %s\n", logoname); + fputs(" */\n\n", out); + fputs("#include <linux/linux_logo.h>\n\n", out); + fprintf(out, "static unsigned char %s_data[] __initdata = {\n", + logoname); } static void write_footer(void) { - fputs("\n};\n\n", out); - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname); - fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]); - fprintf(out, "\t.width\t\t= %d,\n", logo_width); - fprintf(out, "\t.height\t\t= %d,\n", logo_height); - if (logo_type == LINUX_LOGO_CLUT224) { - fprintf(out, "\t.clutsize\t= %d,\n", logo_clutsize); - fprintf(out, "\t.clut\t\t= %s_clut,\n", logoname); - } - fprintf(out, "\t.data\t\t= %s_data\n", logoname); - fputs("};\n\n", out); - - /* close logo file */ - if (outputname) - fclose(out); + fputs("\n};\n\n", out); + fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname); + fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]); + fprintf(out, "\t.width\t\t= %d,\n", logo_width); + fprintf(out, "\t.height\t\t= %d,\n", logo_height); + if (logo_type == LINUX_LOGO_CLUT224) { + fprintf(out, "\t.clutsize\t= %d,\n", logo_clutsize); + fprintf(out, "\t.clut\t\t= %s_clut,\n", logoname); + } + fprintf(out, "\t.data\t\t= %s_data\n", logoname); + fputs("};\n\n", out); + + /* close logo file */ + if (outputname) + fclose(out); } static int write_hex_cnt; static void write_hex(unsigned char byte) { - if (write_hex_cnt % 12) - fprintf(out, ", 0x%02x", byte); - else if (write_hex_cnt) - fprintf(out, ",\n\t0x%02x", byte); - else - fprintf(out, "\t0x%02x", byte); - write_hex_cnt++; + if (write_hex_cnt % 12) + fprintf(out, ", 0x%02x", byte); + else if (write_hex_cnt) + fprintf(out, ",\n\t0x%02x", byte); + else + fprintf(out, "\t0x%02x", byte); + write_hex_cnt++; } static void write_logo_mono(void) { - unsigned int i, j; - unsigned char val, bit; - - /* validate image */ - for (i = 0; i < logo_height; i++) - for (j = 0; j < logo_width; j++) - if (!is_black(logo_data[i][j]) && !is_white(logo_data[i][j])) - die("Image must be monochrome\n"); - - /* write file header */ - write_header(); - - /* write logo data */ - for (i = 0; i < logo_height; i++) { - for (j = 0; j < logo_width;) { - for (val = 0, bit = 0x80; bit && j < logo_width; j++, bit >>= 1) - if (logo_data[i][j].red) - val |= bit; - write_hex(val); + unsigned int i, j; + unsigned char val, bit; + + /* validate image */ + for (i = 0; i < logo_height; i++) + for (j = 0; j < logo_width; j++) + if (!is_black(logo_data[i][j]) && !is_white(logo_data[i][j])) + die("Image must be monochrome\n"); + + /* write file header */ + write_header(); + + /* write logo data */ + for (i = 0; i < logo_height; i++) { + for (j = 0; j < logo_width;) { + for (val = 0, bit = 0x80; bit && j < logo_width; j++, bit >>= 1) + if (logo_data[i][j].red) + val |= bit; + write_hex(val); + } } - } - /* write logo structure and file footer */ - write_footer(); + /* write logo structure and file footer */ + write_footer(); } static void write_logo_vga16(void) { - unsigned int i, j, k; - unsigned char val; - - /* validate image */ - for (i = 0; i < logo_height; i++) - for (j = 0; j < logo_width; j++) { - for (k = 0; k < 16; k++) - if (is_equal(logo_data[i][j], clut_vga16[k])) - break; - if (k == 16) - die("Image must use the 16 console colors only\n" - "Use ppmquant(1) -map clut_vga16.ppm to reduce the number " - "of colors\n"); - } + unsigned int i, j, k; + unsigned char val; - /* write file header */ - write_header(); - - /* write logo data */ - for (i = 0; i < logo_height; i++) - for (j = 0; j < logo_width; j++) { - for (k = 0; k < 16; k++) - if (is_equal(logo_data[i][j], clut_vga16[k])) - break; - val = k<<4; - if (++j < logo_width) { - for (k = 0; k < 16; k++) - if (is_equal(logo_data[i][j], clut_vga16[k])) - break; - val |= k; - } - write_hex(val); - } + /* validate image */ + for (i = 0; i < logo_height; i++) + for (j = 0; j < logo_width; j++) { + for (k = 0; k < 16; k++) + if (is_equal(logo_data[i][j], clut_vga16[k])) + break; + if (k == 16) + die("Image must use the 16 console colors only\n" + "Use ppmquant(1) -map clut_vga16.ppm to reduce the number " + "of colors\n"); + } - /* write logo structure and file footer */ - write_footer(); + /* write file header */ + write_header(); + + /* write logo data */ + for (i = 0; i < logo_height; i++) + for (j = 0; j < logo_width; j++) { + for (k = 0; k < 16; k++) + if (is_equal(logo_data[i][j], clut_vga16[k])) + break; + val = k<<4; + if (++j < logo_width) { + for (k = 0; k < 16; k++) + if (is_equal(logo_data[i][j], clut_vga16[k])) + break; + val |= k; + } + write_hex(val); + } + + /* write logo structure and file footer */ + write_footer(); } static void write_logo_clut224(void) { - unsigned int i, j, k; - - /* validate image */ - for (i = 0; i < logo_height; i++) - for (j = 0; j < logo_width; j++) { - for (k = 0; k < logo_clutsize; k++) - if (is_equal(logo_data[i][j], logo_clut[k])) - break; - if (k == logo_clutsize) { - if (logo_clutsize == MAX_LINUX_LOGO_COLORS) - die("Image has more than %d colors\n" - "Use ppmquant(1) to reduce the number of colors\n", - MAX_LINUX_LOGO_COLORS); - logo_clut[logo_clutsize++] = logo_data[i][j]; - } - } + unsigned int i, j, k; - /* write file header */ - write_header(); + /* validate image */ + for (i = 0; i < logo_height; i++) + for (j = 0; j < logo_width; j++) { + for (k = 0; k < logo_clutsize; k++) + if (is_equal(logo_data[i][j], logo_clut[k])) + break; + if (k == logo_clutsize) { + if (logo_clutsize == MAX_LINUX_LOGO_COLORS) + die("Image has more than %d colors\n" + "Use ppmquant(1) to reduce the number of colors\n", + MAX_LINUX_LOGO_COLORS); + logo_clut[logo_clutsize++] = logo_data[i][j]; + } + } - /* write logo data */ - for (i = 0; i < logo_height; i++) - for (j = 0; j < logo_width; j++) { - for (k = 0; k < logo_clutsize; k++) - if (is_equal(logo_data[i][j], logo_clut[k])) - break; - write_hex(k+32); + /* write file header */ + write_header(); + + /* write logo data */ + for (i = 0; i < logo_height; i++) + for (j = 0; j < logo_width; j++) { + for (k = 0; k < logo_clutsize; k++) + if (is_equal(logo_data[i][j], logo_clut[k])) + break; + write_hex(k+32); + } + fputs("\n};\n\n", out); + + /* write logo clut */ + fprintf(out, "static unsigned char %s_clut[] __initdata = {\n", + logoname); + write_hex_cnt = 0; + for (i = 0; i < logo_clutsize; i++) { + write_hex(logo_clut[i].red); + write_hex(logo_clut[i].green); + write_hex(logo_clut[i].blue); } - fputs("\n};\n\n", out); - - /* write logo clut */ - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n", - logoname); - write_hex_cnt = 0; - for (i = 0; i < logo_clutsize; i++) { - write_hex(logo_clut[i].red); - write_hex(logo_clut[i].green); - write_hex(logo_clut[i].blue); - } - - /* write logo structure and file footer */ - write_footer(); + + /* write logo structure and file footer */ + write_footer(); } static void write_logo_gray256(void) { - unsigned int i, j; + unsigned int i, j; - /* validate image */ - for (i = 0; i < logo_height; i++) - for (j = 0; j < logo_width; j++) - if (!is_gray(logo_data[i][j])) - die("Image must be grayscale\n"); + /* validate image */ + for (i = 0; i < logo_height; i++) + for (j = 0; j < logo_width; j++) + if (!is_gray(logo_data[i][j])) + die("Image must be grayscale\n"); - /* write file header */ - write_header(); + /* write file header */ + write_header(); - /* write logo data */ - for (i = 0; i < logo_height; i++) - for (j = 0; j < logo_width; j++) - write_hex(logo_data[i][j].red); + /* write logo data */ + for (i = 0; i < logo_height; i++) + for (j = 0; j < logo_width; j++) + write_hex(logo_data[i][j].red); - /* write logo structure and file footer */ - write_footer(); + /* write logo structure and file footer */ + write_footer(); } static void die(const char *fmt, ...) { - va_list ap; + va_list ap; - va_start(ap, fmt); - vfprintf(stderr, fmt, ap); - va_end(ap); + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); + va_end(ap); - exit(1); + exit(1); } static void usage(void) { - die("\n" + die("\n" "Usage: %s [options] <filename>\n" "\n" "Valid options:\n" - " -h : display this usage information\n" - " -n <name> : specify logo name (default: linux_logo)\n" - " -o <output> : output to file <output> instead of stdout\n" - " -t <type> : specify logo type, one of\n" - " mono : monochrome black/white\n" - " vga16 : 16 colors VGA text palette\n" - " clut224 : 224 colors (default)\n" - " gray256 : 256 levels grayscale\n" + " -h : display this usage information\n" + " -n <name> : specify logo name (default: linux_logo)\n" + " -o <output> : output to file <output> instead of stdout\n" + " -t <type> : specify logo type, one of\n" + " mono : monochrome black/white\n" + " vga16 : 16 colors VGA text palette\n" + " clut224 : 224 colors (default)\n" + " gray256 : 256 levels grayscale\n" "\n", programname); } int main(int argc, char *argv[]) { - int opt; + int opt; - programname = argv[0]; + programname = argv[0]; - opterr = 0; - while (1) { - opt = getopt(argc, argv, "hn:o:t:"); - if (opt == -1) - break; + opterr = 0; + while (1) { + opt = getopt(argc, argv, "hn:o:t:"); + if (opt == -1) + break; - switch (opt) { - case 'h': - usage(); - break; + switch (opt) { + case 'h': + usage(); + break; - case 'n': - logoname = optarg; - break; + case 'n': + logoname = optarg; + break; - case 'o': - outputname = optarg; - break; + case 'o': + outputname = optarg; + break; - case 't': - if (!strcmp(optarg, "mono")) - logo_type = LINUX_LOGO_MONO; - else if (!strcmp(optarg, "vga16")) - logo_type = LINUX_LOGO_VGA16; - else if (!strcmp(optarg, "clut224")) - logo_type = LINUX_LOGO_CLUT224; - else if (!strcmp(optarg, "gray256")) - logo_type = LINUX_LOGO_GRAY256; - else - usage(); - break; + case 't': + if (!strcmp(optarg, "mono")) + logo_type = LINUX_LOGO_MONO; + else if (!strcmp(optarg, "vga16")) + logo_type = LINUX_LOGO_VGA16; + else if (!strcmp(optarg, "clut224")) + logo_type = LINUX_LOGO_CLUT224; + else if (!strcmp(optarg, "gray256")) + logo_type = LINUX_LOGO_GRAY256; + else + usage(); + break; - default: - usage(); - break; + default: + usage(); + break; + } } - } - if (optind != argc-1) - usage(); + if (optind != argc-1) + usage(); - filename = argv[optind]; + filename = argv[optind]; - read_image(); - switch (logo_type) { + read_image(); + switch (logo_type) { case LINUX_LOGO_MONO: - write_logo_mono(); - break; + write_logo_mono(); + break; case LINUX_LOGO_VGA16: - write_logo_vga16(); - break; + write_logo_vga16(); + break; case LINUX_LOGO_CLUT224: - write_logo_clut224(); - break; + write_logo_clut224(); + break; case LINUX_LOGO_GRAY256: - write_logo_gray256(); - break; - } - exit(0); + write_logo_gray256(); + break; + } + exit(0); } diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c index 7b4e9009f335..46f1a8d558b0 100644 --- a/drivers/virt/coco/sev-guest/sev-guest.c +++ b/drivers/virt/coco/sev-guest/sev-guest.c @@ -31,6 +31,9 @@ #define AAD_LEN 48 #define MSG_HDR_VER 1 +#define SNP_REQ_MAX_RETRY_DURATION (60*HZ) +#define SNP_REQ_RETRY_DELAY (2*HZ) + struct snp_guest_crypto { struct crypto_aead *tfm; u8 *iv, *authtag; @@ -318,26 +321,14 @@ static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8 return __enc_payload(snp_dev, req, payload, sz); } -static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver, - u8 type, void *req_buf, size_t req_sz, void *resp_buf, - u32 resp_sz, __u64 *fw_err) +static int __handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, __u64 *fw_err) { - unsigned long err; - u64 seqno; + unsigned long err = 0xff, override_err = 0; + unsigned long req_start = jiffies; + unsigned int override_npages = 0; int rc; - /* Get message sequence and verify that its a non-zero */ - seqno = snp_get_msg_seqno(snp_dev); - if (!seqno) - return -EIO; - - memset(snp_dev->response, 0, sizeof(struct snp_guest_msg)); - - /* Encrypt the userspace provided payload */ - rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz); - if (rc) - return rc; - +retry_request: /* * Call firmware to process the request. In this function the encrypted * message enters shared memory with the host. So after this call the @@ -345,18 +336,24 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in * prevent reuse of the IV. */ rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err); + switch (rc) { + case -ENOSPC: + /* + * If the extended guest request fails due to having too + * small of a certificate data buffer, retry the same + * guest request without the extended data request in + * order to increment the sequence number and thus avoid + * IV reuse. + */ + override_npages = snp_dev->input.data_npages; + exit_code = SVM_VMGEXIT_GUEST_REQUEST; - /* - * If the extended guest request fails due to having too small of a - * certificate data buffer, retry the same guest request without the - * extended data request in order to increment the sequence number - * and thus avoid IV reuse. - */ - if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST && - err == SNP_GUEST_REQ_INVALID_LEN) { - const unsigned int certs_npages = snp_dev->input.data_npages; - - exit_code = SVM_VMGEXIT_GUEST_REQUEST; + /* + * Override the error to inform callers the given extended + * request buffer size was too small and give the caller the + * required buffer size. + */ + override_err = SNP_GUEST_REQ_INVALID_LEN; /* * If this call to the firmware succeeds, the sequence number can @@ -366,15 +363,20 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in * of the VMPCK and the error code being propagated back to the * user as an ioctl() return code. */ - rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err); + goto retry_request; - /* - * Override the error to inform callers the given extended - * request buffer size was too small and give the caller the - * required buffer size. - */ - err = SNP_GUEST_REQ_INVALID_LEN; - snp_dev->input.data_npages = certs_npages; + /* + * The host may return SNP_GUEST_REQ_ERR_EBUSY if the request has been + * throttled. Retry in the driver to avoid returning and reusing the + * message sequence number on a different message. + */ + case -EAGAIN: + if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) { + rc = -ETIMEDOUT; + break; + } + schedule_timeout_killable(SNP_REQ_RETRY_DELAY); + goto retry_request; } /* @@ -386,7 +388,10 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in snp_inc_msg_seqno(snp_dev); if (fw_err) - *fw_err = err; + *fw_err = override_err ?: err; + + if (override_npages) + snp_dev->input.data_npages = override_npages; /* * If an extended guest request was issued and the supplied certificate @@ -394,29 +399,49 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in * prevent IV reuse. If the standard request was successful, return -EIO * back to the caller as would have originally been returned. */ - if (!rc && err == SNP_GUEST_REQ_INVALID_LEN) + if (!rc && override_err == SNP_GUEST_REQ_INVALID_LEN) + return -EIO; + + return rc; +} + +static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver, + u8 type, void *req_buf, size_t req_sz, void *resp_buf, + u32 resp_sz, __u64 *fw_err) +{ + u64 seqno; + int rc; + + /* Get message sequence and verify that its a non-zero */ + seqno = snp_get_msg_seqno(snp_dev); + if (!seqno) return -EIO; + memset(snp_dev->response, 0, sizeof(struct snp_guest_msg)); + + /* Encrypt the userspace provided payload */ + rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz); + if (rc) + return rc; + + rc = __handle_guest_request(snp_dev, exit_code, fw_err); if (rc) { - dev_alert(snp_dev->dev, - "Detected error from ASP request. rc: %d, fw_err: %llu\n", - rc, *fw_err); - goto disable_vmpck; + if (rc == -EIO && *fw_err == SNP_GUEST_REQ_INVALID_LEN) + return rc; + + dev_alert(snp_dev->dev, "Detected error from ASP request. rc: %d, fw_err: %llu\n", rc, *fw_err); + snp_disable_vmpck(snp_dev); + return rc; } rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz); if (rc) { - dev_alert(snp_dev->dev, - "Detected unexpected decode failure from ASP. rc: %d\n", - rc); - goto disable_vmpck; + dev_alert(snp_dev->dev, "Detected unexpected decode failure from ASP. rc: %d\n", rc); + snp_disable_vmpck(snp_dev); + return rc; } return 0; - -disable_vmpck: - snp_disable_vmpck(snp_dev); - return rc; } static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) @@ -703,6 +728,9 @@ static int __init sev_guest_probe(struct platform_device *pdev) void __iomem *mapping; int ret; + if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + return -ENODEV; + if (!dev->platform_data) return -ENODEV; diff --git a/drivers/xen/xenfs/xensyms.c b/drivers/xen/xenfs/xensyms.c index c6c73a33c44d..b799bc759c15 100644 --- a/drivers/xen/xenfs/xensyms.c +++ b/drivers/xen/xenfs/xensyms.c @@ -64,7 +64,7 @@ static int xensyms_next_sym(struct xensyms *xs) static void *xensyms_start(struct seq_file *m, loff_t *pos) { - struct xensyms *xs = (struct xensyms *)m->private; + struct xensyms *xs = m->private; xs->op.u.symdata.symnum = *pos; @@ -76,7 +76,7 @@ static void *xensyms_start(struct seq_file *m, loff_t *pos) static void *xensyms_next(struct seq_file *m, void *p, loff_t *pos) { - struct xensyms *xs = (struct xensyms *)m->private; + struct xensyms *xs = m->private; xs->op.u.symdata.symnum = ++(*pos); @@ -88,7 +88,7 @@ static void *xensyms_next(struct seq_file *m, void *p, loff_t *pos) static int xensyms_show(struct seq_file *m, void *p) { - struct xensyms *xs = (struct xensyms *)m->private; + struct xensyms *xs = m->private; struct xenpf_symdata *symdata = &xs->op.u.symdata; seq_printf(m, "%016llx %c %s\n", symdata->address, @@ -120,7 +120,7 @@ static int xensyms_open(struct inode *inode, struct file *file) return ret; m = file->private_data; - xs = (struct xensyms *)m->private; + xs = m->private; xs->namelen = XEN_KSYM_NAME_LEN + 1; xs->name = kzalloc(xs->namelen, GFP_KERNEL); @@ -138,7 +138,7 @@ static int xensyms_open(struct inode *inode, struct file *file) static int xensyms_release(struct inode *inode, struct file *file) { struct seq_file *m = file->private_data; - struct xensyms *xs = (struct xensyms *)m->private; + struct xensyms *xs = m->private; kfree(xs->name); return seq_release_private(inode, file); |