diff options
Diffstat (limited to 'drivers/acpi/ec.c')
| -rw-r--r-- | drivers/acpi/ec.c | 757 |
1 files changed, 481 insertions, 276 deletions
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index e629e891d1bb..59b3d50ff01e 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -23,8 +23,10 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/list.h> +#include <linux/printk.h> #include <linux/spinlock.h> #include <linux/slab.h> +#include <linux/string.h> #include <linux/suspend.h> #include <linux/acpi.h> #include <linux/dmi.h> @@ -92,10 +94,9 @@ enum ec_command { enum { EC_FLAGS_QUERY_ENABLED, /* Query is enabled */ - EC_FLAGS_QUERY_PENDING, /* Query is pending */ - EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */ EC_FLAGS_EVENT_HANDLER_INSTALLED, /* Event handler installed */ EC_FLAGS_EC_HANDLER_INSTALLED, /* OpReg handler installed */ + EC_FLAGS_EC_REG_CALLED, /* OpReg ACPI _REG method called */ EC_FLAGS_QUERY_METHODS_INSTALLED, /* _Qxx handlers installed */ EC_FLAGS_STARTED, /* Driver is started */ EC_FLAGS_STOPPED, /* Driver is stopped */ @@ -133,7 +134,7 @@ static unsigned int ec_storm_threshold __read_mostly = 8; module_param(ec_storm_threshold, uint, 0644); MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm"); -static bool ec_freeze_events __read_mostly = false; +static bool ec_freeze_events __read_mostly; module_param(ec_freeze_events, bool, 0644); MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume"); @@ -166,23 +167,22 @@ struct acpi_ec_query { struct transaction transaction; struct work_struct work; struct acpi_ec_query_handler *handler; + struct acpi_ec *ec; }; -static int acpi_ec_query(struct acpi_ec *ec, u8 *data); +static int acpi_ec_submit_query(struct acpi_ec *ec); static void advance_transaction(struct acpi_ec *ec, bool interrupt); static void acpi_ec_event_handler(struct work_struct *work); -static void acpi_ec_event_processor(struct work_struct *work); struct acpi_ec *first_ec; EXPORT_SYMBOL(first_ec); static struct acpi_ec *boot_ec; -static bool boot_ec_is_ecdt = false; +static bool boot_ec_is_ecdt; static struct workqueue_struct *ec_wq; static struct workqueue_struct *ec_query_wq; static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */ -static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */ static int EC_FLAGS_TRUST_DSDT_GPE; /* Needs DSDT GPE as correction setting */ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ @@ -443,24 +443,50 @@ static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec) return true; } -static void acpi_ec_submit_query(struct acpi_ec *ec) +static void acpi_ec_submit_event(struct acpi_ec *ec) { + /* + * It is safe to mask the events here, because acpi_ec_close_event() + * will run at least once after this. + */ acpi_ec_mask_events(ec); if (!acpi_ec_event_enabled(ec)) return; - if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { - ec_dbg_evt("Command(%s) submitted/blocked", - acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); - ec->nr_pending_queries++; - queue_work(ec_wq, &ec->work); - } + + if (ec->event_state != EC_EVENT_READY) + return; + + ec_dbg_evt("Command(%s) submitted/blocked", + acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); + + ec->event_state = EC_EVENT_IN_PROGRESS; + /* + * If events_to_process is greater than 0 at this point, the while () + * loop in acpi_ec_event_handler() is still running and incrementing + * events_to_process will cause it to invoke acpi_ec_submit_query() once + * more, so it is not necessary to queue up the event work to start the + * same loop again. + */ + if (ec->events_to_process++ > 0) + return; + + ec->events_in_progress++; + queue_work(ec_wq, &ec->work); +} + +static void acpi_ec_complete_event(struct acpi_ec *ec) +{ + if (ec->event_state == EC_EVENT_IN_PROGRESS) + ec->event_state = EC_EVENT_COMPLETE; } -static void acpi_ec_complete_query(struct acpi_ec *ec) +static void acpi_ec_close_event(struct acpi_ec *ec) { - if (test_and_clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) + if (ec->event_state != EC_EVENT_READY) ec_dbg_evt("Command(%s) unblocked", acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); + + ec->event_state = EC_EVENT_READY; acpi_ec_unmask_events(ec); } @@ -487,12 +513,10 @@ static inline void __acpi_ec_disable_event(struct acpi_ec *ec) */ static void acpi_ec_clear(struct acpi_ec *ec) { - int i, status; - u8 value = 0; + int i; for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { - status = acpi_ec_query(ec, &value); - if (status || !value) + if (acpi_ec_submit_query(ec)) break; } if (unlikely(i == ACPI_EC_CLEAR_MAX)) @@ -518,7 +542,7 @@ static void acpi_ec_enable_event(struct acpi_ec *ec) #ifdef CONFIG_PM_SLEEP static void __acpi_ec_flush_work(void) { - drain_workqueue(ec_wq); /* flush ec->work */ + flush_workqueue(ec_wq); /* flush ec->work */ flush_workqueue(ec_query_wq); /* flush queries */ } @@ -549,8 +573,8 @@ void acpi_ec_flush_work(void) static bool acpi_ec_guard_event(struct acpi_ec *ec) { - bool guarded = true; unsigned long flags; + bool guarded; spin_lock_irqsave(&ec->lock, flags); /* @@ -559,19 +583,15 @@ static bool acpi_ec_guard_event(struct acpi_ec *ec) * evaluating _Qxx, so we need to re-check SCI_EVT after waiting an * acceptable period. * - * The guarding period begins when EC_FLAGS_QUERY_PENDING is - * flagged, which means SCI_EVT check has just been performed. - * But if the current transaction is ACPI_EC_COMMAND_QUERY, the - * guarding should have already been performed (via - * EC_FLAGS_QUERY_GUARDING) and should not be applied so that the - * ACPI_EC_COMMAND_QUERY transaction can be transitioned into - * ACPI_EC_COMMAND_POLL state immediately. + * The guarding period is applicable if the event state is not + * EC_EVENT_READY, but otherwise if the current transaction is of the + * ACPI_EC_COMMAND_QUERY type, the guarding should have elapsed already + * and it should not be applied to let the transaction transition into + * the ACPI_EC_COMMAND_POLL state immediately. */ - if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS || - ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY || - !test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags) || - (ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY)) - guarded = false; + guarded = ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT && + ec->event_state != EC_EVENT_READY && + (!ec->curr || ec->curr->command != ACPI_EC_COMMAND_QUERY); spin_unlock_irqrestore(&ec->lock, flags); return guarded; } @@ -603,16 +623,26 @@ static int ec_transaction_completed(struct acpi_ec *ec) static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag) { ec->curr->flags |= flag; - if (ec->curr->command == ACPI_EC_COMMAND_QUERY) { - if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS && - flag == ACPI_EC_COMMAND_POLL) - acpi_ec_complete_query(ec); - if (ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY && - flag == ACPI_EC_COMMAND_COMPLETE) - acpi_ec_complete_query(ec); - if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT && - flag == ACPI_EC_COMMAND_COMPLETE) - set_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags); + + if (ec->curr->command != ACPI_EC_COMMAND_QUERY) + return; + + switch (ec_event_clearing) { + case ACPI_EC_EVT_TIMING_STATUS: + if (flag == ACPI_EC_COMMAND_POLL) + acpi_ec_close_event(ec); + + return; + + case ACPI_EC_EVT_TIMING_QUERY: + if (flag == ACPI_EC_COMMAND_COMPLETE) + acpi_ec_close_event(ec); + + return; + + case ACPI_EC_EVT_TIMING_EVENT: + if (flag == ACPI_EC_COMMAND_COMPLETE) + acpi_ec_complete_event(ec); } } @@ -634,21 +664,6 @@ static void advance_transaction(struct acpi_ec *ec, bool interrupt) ec_dbg_stm("%s (%d)", interrupt ? "IRQ" : "TASK", smp_processor_id()); - /* - * Clear GPE_STS upfront to allow subsequent hardware GPE_STS 0->1 - * changes to always trigger a GPE interrupt. - * - * GPE STS is a W1C register, which means: - * - * 1. Software can clear it without worrying about clearing the other - * GPEs' STS bits when the hardware sets them in parallel. - * - * 2. As long as software can ensure only clearing it when it is set, - * hardware won't set it in parallel. - */ - if (ec->gpe >= 0 && acpi_ec_gpe_status_set(ec)) - acpi_clear_gpe(NULL, ec->gpe); - status = acpi_ec_read_status(ec); /* @@ -657,11 +672,9 @@ static void advance_transaction(struct acpi_ec *ec, bool interrupt) */ if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) { if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT && - (!ec->nr_pending_queries || - test_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags))) { - clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags); - acpi_ec_complete_query(ec); - } + ec->event_state == EC_EVENT_COMPLETE) + acpi_ec_close_event(ec); + if (!t) goto out; } @@ -696,7 +709,7 @@ static void advance_transaction(struct acpi_ec *ec, bool interrupt) out: if (status & ACPI_EC_FLAG_SCI) - acpi_ec_submit_query(ec); + acpi_ec_submit_event(ec); if (wakeup && interrupt) wake_up(&ec->wait); @@ -772,6 +785,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, unsigned long tmp; int ret = 0; + if (t->rdata) + memset(t->rdata, 0, t->rlen); + /* start transaction */ spin_lock_irqsave(&ec->lock, tmp); /* Enable GPE for command processing (IBF=0/OBF=1) */ @@ -808,8 +824,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata)) return -EINVAL; - if (t->rdata) - memset(t->rdata, 0, t->rlen); mutex_lock(&ec->mutex); if (ec->global_lock) { @@ -836,7 +850,7 @@ static int acpi_ec_burst_enable(struct acpi_ec *ec) .wdata = NULL, .rdata = &d, .wlen = 0, .rlen = 1}; - return acpi_ec_transaction(ec, &t); + return acpi_ec_transaction_unlocked(ec, &t); } static int acpi_ec_burst_disable(struct acpi_ec *ec) @@ -846,7 +860,7 @@ static int acpi_ec_burst_disable(struct acpi_ec *ec) .wlen = 0, .rlen = 0}; return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ? - acpi_ec_transaction(ec, &t) : 0; + acpi_ec_transaction_unlocked(ec, &t) : 0; } static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data) @@ -862,6 +876,19 @@ static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data) return result; } +static int acpi_ec_read_unlocked(struct acpi_ec *ec, u8 address, u8 *data) +{ + int result; + u8 d; + struct transaction t = {.command = ACPI_EC_COMMAND_READ, + .wdata = &address, .rdata = &d, + .wlen = 1, .rlen = 1}; + + result = acpi_ec_transaction_unlocked(ec, &t); + *data = d; + return result; +} + static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data) { u8 wdata[2] = { address, data }; @@ -872,6 +899,16 @@ static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data) return acpi_ec_transaction(ec, &t); } +static int acpi_ec_write_unlocked(struct acpi_ec *ec, u8 address, u8 data) +{ + u8 wdata[2] = { address, data }; + struct transaction t = {.command = ACPI_EC_COMMAND_WRITE, + .wdata = wdata, .rdata = NULL, + .wlen = 2, .rlen = 0}; + + return acpi_ec_transaction_unlocked(ec, &t); +} + int ec_read(u8 addr, u8 *val) { int err; @@ -892,14 +929,10 @@ EXPORT_SYMBOL(ec_read); int ec_write(u8 addr, u8 val) { - int err; - if (!first_ec) return -ENODEV; - err = acpi_ec_write(first_ec, addr, val); - - return err; + return acpi_ec_write(first_ec, addr, val); } EXPORT_SYMBOL(ec_write); @@ -1061,9 +1094,12 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, acpi_handle handle, acpi_ec_query_func func, void *data) { - struct acpi_ec_query_handler *handler = - kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL); + struct acpi_ec_query_handler *handler; + + if (!handle && !func) + return -EINVAL; + handler = kzalloc(sizeof(*handler), GFP_KERNEL); if (!handler) return -ENOMEM; @@ -1075,6 +1111,7 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, kref_init(&handler->kref); list_add(&handler->node, &ec->list); mutex_unlock(&ec->mutex); + return 0; } EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler); @@ -1087,9 +1124,16 @@ static void acpi_ec_remove_query_handlers(struct acpi_ec *ec, mutex_lock(&ec->mutex); list_for_each_entry_safe(handler, tmp, &ec->list, node) { - if (remove_all || query_bit == handler->query_bit) { + /* + * When remove_all is false, only remove custom query handlers + * which have handler->func set. This is done to preserve query + * handlers discovered thru ACPI, as they should continue handling + * EC queries. + */ + if (remove_all || (handler->func && handler->query_bit == query_bit)) { list_del_init(&handler->node); list_add(&handler->node, &free_list); + } } mutex_unlock(&ec->mutex); @@ -1100,10 +1144,34 @@ static void acpi_ec_remove_query_handlers(struct acpi_ec *ec, void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) { acpi_ec_remove_query_handlers(ec, false, query_bit); + flush_workqueue(ec_query_wq); } EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); -static struct acpi_ec_query *acpi_ec_create_query(u8 *pval) +static void acpi_ec_event_processor(struct work_struct *work) +{ + struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work); + struct acpi_ec_query_handler *handler = q->handler; + struct acpi_ec *ec = q->ec; + + ec_dbg_evt("Query(0x%02x) started", handler->query_bit); + + if (handler->func) + handler->func(handler->data); + else if (handler->handle) + acpi_evaluate_object(handler->handle, NULL, NULL, NULL); + + ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit); + + spin_lock_irq(&ec->lock); + ec->queries_in_progress--; + spin_unlock_irq(&ec->lock); + + acpi_ec_put_query_handler(handler); + kfree(q); +} + +static struct acpi_ec_query *acpi_ec_create_query(struct acpi_ec *ec, u8 *pval) { struct acpi_ec_query *q; struct transaction *t; @@ -1111,44 +1179,23 @@ static struct acpi_ec_query *acpi_ec_create_query(u8 *pval) q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL); if (!q) return NULL; + INIT_WORK(&q->work, acpi_ec_event_processor); t = &q->transaction; t->command = ACPI_EC_COMMAND_QUERY; t->rdata = pval; t->rlen = 1; + q->ec = ec; return q; } -static void acpi_ec_delete_query(struct acpi_ec_query *q) -{ - if (q) { - if (q->handler) - acpi_ec_put_query_handler(q->handler); - kfree(q); - } -} - -static void acpi_ec_event_processor(struct work_struct *work) -{ - struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work); - struct acpi_ec_query_handler *handler = q->handler; - - ec_dbg_evt("Query(0x%02x) started", handler->query_bit); - if (handler->func) - handler->func(handler->data); - else if (handler->handle) - acpi_evaluate_object(handler->handle, NULL, NULL, NULL); - ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit); - acpi_ec_delete_query(q); -} - -static int acpi_ec_query(struct acpi_ec *ec, u8 *data) +static int acpi_ec_submit_query(struct acpi_ec *ec) { + struct acpi_ec_query *q; u8 value = 0; int result; - struct acpi_ec_query *q; - q = acpi_ec_create_query(&value); + q = acpi_ec_create_query(ec, &value); if (!q) return -ENOMEM; @@ -1158,11 +1205,14 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) * bit to be cleared (and thus clearing the interrupt source). */ result = acpi_ec_transaction(ec, &q->transaction); - if (!value) - result = -ENODATA; if (result) goto err_exit; + if (!value) { + result = -ENODATA; + goto err_exit; + } + q->handler = acpi_ec_get_query_handler_by_value(ec, value); if (!q->handler) { result = -ENODATA; @@ -1170,76 +1220,97 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) } /* - * It is reported that _Qxx are evaluated in a parallel way on - * Windows: + * It is reported that _Qxx are evaluated in a parallel way on Windows: * https://bugzilla.kernel.org/show_bug.cgi?id=94411 * - * Put this log entry before schedule_work() in order to make - * it appearing before any other log entries occurred during the - * work queue execution. + * Put this log entry before queue_work() to make it appear in the log + * before any other messages emitted during workqueue handling. */ ec_dbg_evt("Query(0x%02x) scheduled", value); - if (!queue_work(ec_query_wq, &q->work)) { - ec_dbg_evt("Query(0x%02x) overlapped", value); - result = -EBUSY; - } -err_exit: - if (result) - acpi_ec_delete_query(q); - if (data) - *data = value; - return result; -} + spin_lock_irq(&ec->lock); -static void acpi_ec_check_event(struct acpi_ec *ec) -{ - unsigned long flags; + ec->queries_in_progress++; + queue_work(ec_query_wq, &q->work); - if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) { - if (ec_guard(ec)) { - spin_lock_irqsave(&ec->lock, flags); - /* - * Take care of the SCI_EVT unless no one else is - * taking care of it. - */ - if (!ec->curr) - advance_transaction(ec, false); - spin_unlock_irqrestore(&ec->lock, flags); - } - } + spin_unlock_irq(&ec->lock); + + return 0; + +err_exit: + kfree(q); + + return result; } static void acpi_ec_event_handler(struct work_struct *work) { - unsigned long flags; struct acpi_ec *ec = container_of(work, struct acpi_ec, work); ec_dbg_evt("Event started"); - spin_lock_irqsave(&ec->lock, flags); - while (ec->nr_pending_queries) { - spin_unlock_irqrestore(&ec->lock, flags); - (void)acpi_ec_query(ec, NULL); - spin_lock_irqsave(&ec->lock, flags); - ec->nr_pending_queries--; - /* - * Before exit, make sure that this work item can be - * scheduled again. There might be QR_EC failures, leaving - * EC_FLAGS_QUERY_PENDING uncleared and preventing this work - * item from being scheduled again. - */ - if (!ec->nr_pending_queries) { - if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS || - ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY) - acpi_ec_complete_query(ec); - } + spin_lock_irq(&ec->lock); + + while (ec->events_to_process) { + spin_unlock_irq(&ec->lock); + + acpi_ec_submit_query(ec); + + spin_lock_irq(&ec->lock); + + ec->events_to_process--; } - spin_unlock_irqrestore(&ec->lock, flags); - ec_dbg_evt("Event stopped"); + /* + * Before exit, make sure that the it will be possible to queue up the + * event handling work again regardless of whether or not the query + * queued up above is processed successfully. + */ + if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) { + bool guard_timeout; + + acpi_ec_complete_event(ec); + + ec_dbg_evt("Event stopped"); + + spin_unlock_irq(&ec->lock); + + guard_timeout = !!ec_guard(ec); + + spin_lock_irq(&ec->lock); - acpi_ec_check_event(ec); + /* Take care of SCI_EVT unless someone else is doing that. */ + if (guard_timeout && !ec->curr) + advance_transaction(ec, false); + } else { + acpi_ec_close_event(ec); + + ec_dbg_evt("Event stopped"); + } + + ec->events_in_progress--; + + spin_unlock_irq(&ec->lock); +} + +static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt) +{ + /* + * Clear GPE_STS upfront to allow subsequent hardware GPE_STS 0->1 + * changes to always trigger a GPE interrupt. + * + * GPE STS is a W1C register, which means: + * + * 1. Software can clear it without worrying about clearing the other + * GPEs' STS bits when the hardware sets them in parallel. + * + * 2. As long as software can ensure only clearing it when it is set, + * hardware won't set it in parallel. + */ + if (ec->gpe >= 0 && acpi_ec_gpe_status_set(ec)) + acpi_clear_gpe(NULL, ec->gpe); + + advance_transaction(ec, true); } static void acpi_ec_handle_interrupt(struct acpi_ec *ec) @@ -1247,7 +1318,9 @@ static void acpi_ec_handle_interrupt(struct acpi_ec *ec) unsigned long flags; spin_lock_irqsave(&ec->lock, flags); - advance_transaction(ec, true); + + clear_gpe_and_advance_transaction(ec, true); + spin_unlock_irqrestore(&ec->lock, flags); } @@ -1276,6 +1349,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, struct acpi_ec *ec = handler_context; int result = 0, i, bytes = bits / 8; u8 *value = (u8 *)value64; + u32 glk; if ((address > 0xFF) || !value || !handler_context) return AE_BAD_PARAMETER; @@ -1283,17 +1357,38 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, if (function != ACPI_READ && function != ACPI_WRITE) return AE_BAD_PARAMETER; + mutex_lock(&ec->mutex); + + if (ec->global_lock) { + acpi_status status; + + status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); + if (ACPI_FAILURE(status)) { + result = -ENODEV; + goto unlock; + } + } + if (ec->busy_polling || bits > 8) acpi_ec_burst_enable(ec); - for (i = 0; i < bytes; ++i, ++address, ++value) + for (i = 0; i < bytes; ++i, ++address, ++value) { result = (function == ACPI_READ) ? - acpi_ec_read(ec, address, value) : - acpi_ec_write(ec, address, *value); + acpi_ec_read_unlocked(ec, address, value) : + acpi_ec_write_unlocked(ec, address, *value); + if (result < 0) + break; + } if (ec->busy_polling || bits > 8) acpi_ec_burst_disable(ec); + if (ec->global_lock) + acpi_release_global_lock(glk); + +unlock: + mutex_unlock(&ec->mutex); + switch (result) { case -EINVAL: return AE_BAD_PARAMETER; @@ -1301,8 +1396,10 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, return AE_NOT_FOUND; case -ETIME: return AE_TIME; - default: + case 0: return AE_OK; + default: + return AE_ERROR; } } @@ -1375,24 +1472,16 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval) if (ec->data_addr == 0 || ec->command_addr == 0) return AE_OK; - if (boot_ec && boot_ec_is_ecdt && EC_FLAGS_IGNORE_DSDT_GPE) { - /* - * Always inherit the GPE number setting from the ECDT - * EC. - */ - ec->gpe = boot_ec->gpe; - } else { - /* Get GPE bit assignment (EC events). */ - /* TODO: Add support for _GPE returning a package */ - status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp); - if (ACPI_SUCCESS(status)) - ec->gpe = tmp; + /* Get GPE bit assignment (EC events). */ + /* TODO: Add support for _GPE returning a package */ + status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp); + if (ACPI_SUCCESS(status)) + ec->gpe = tmp; + /* + * Errors are non-fatal, allowing for ACPI Reduced Hardware + * platforms which use GpioInt instead of GPE. + */ - /* - * Errors are non-fatal, allowing for ACPI Reduced Hardware - * platforms which use GpioInt instead of GPE. - */ - } /* Use the global lock for all EC transactions? */ tmp = 0; acpi_evaluate_integer(handle, "_GLK", NULL, &tmp); @@ -1419,14 +1508,15 @@ static bool install_gpe_event_handler(struct acpi_ec *ec) static bool install_gpio_irq_event_handler(struct acpi_ec *ec) { - return request_irq(ec->irq, acpi_ec_irq_handler, IRQF_SHARED, - "ACPI EC", ec) >= 0; + return request_threaded_irq(ec->irq, NULL, acpi_ec_irq_handler, + IRQF_SHARED | IRQF_ONESHOT, "ACPI EC", ec) >= 0; } /** * ec_install_handlers - Install service callbacks and register query methods. * @ec: Target EC. * @device: ACPI device object corresponding to @ec. + * @call_reg: If _REG should be called to notify OpRegion availability * * Install a handler for the EC address space type unless it has been installed * already. If @device is not NULL, also look for EC query methods in the @@ -1439,18 +1529,21 @@ static bool install_gpio_irq_event_handler(struct acpi_ec *ec) * -EPROBE_DEFER if GPIO IRQ acquisition needs to be deferred, * or 0 (success) otherwise. */ -static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device) +static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device, + bool call_reg) { acpi_status status; acpi_ec_start(ec, false); if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) { + acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle; + acpi_ec_enter_noirq(ec); - status = acpi_install_address_space_handler(ec->handle, - ACPI_ADR_SPACE_EC, - &acpi_ec_space_handler, - NULL, ec); + status = acpi_install_address_space_handler_no_reg(scope_handle, + ACPI_ADR_SPACE_EC, + &acpi_ec_space_handler, + NULL, ec); if (ACPI_FAILURE(status)) { acpi_ec_stop(ec, false); return -ENODEV; @@ -1458,6 +1551,11 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device) set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags); } + if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) { + acpi_execute_reg_methods(ec->handle, ACPI_UINT32_MAX, ACPI_ADR_SPACE_EC); + set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags); + } + if (!device) return 0; @@ -1506,9 +1604,13 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device) static void ec_remove_handlers(struct acpi_ec *ec) { + acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle; + if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) { - if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle, - ACPI_ADR_SPACE_EC, &acpi_ec_space_handler))) + if (ACPI_FAILURE(acpi_remove_address_space_handler( + scope_handle, + ACPI_ADR_SPACE_EC, + &acpi_ec_space_handler))) pr_err("failed to remove space handler\n"); clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags); } @@ -1543,18 +1645,22 @@ static void ec_remove_handlers(struct acpi_ec *ec) } } -static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device) +static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device, bool call_reg) { int ret; - ret = ec_install_handlers(ec, device); - if (ret) - return ret; - /* First EC capable of handling transactions */ if (!first_ec) first_ec = ec; + ret = ec_install_handlers(ec, device, call_reg); + if (ret) { + if (ec == first_ec) + first_ec = NULL; + + return ret; + } + pr_info("EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", ec->command_addr, ec->data_addr); @@ -1573,8 +1679,8 @@ static int acpi_ec_add(struct acpi_device *device) struct acpi_ec *ec; int ret; - strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME); - strcpy(acpi_device_class(device), ACPI_EC_CLASS); + strscpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME); + strscpy(acpi_device_class(device), ACPI_EC_CLASS); if (boot_ec && (boot_ec->handle == device->handle || !strcmp(acpi_device_hid(device), ACPI_ECDT_HID))) { @@ -1594,22 +1700,25 @@ static int acpi_ec_add(struct acpi_device *device) } if (boot_ec && ec->command_addr == boot_ec->command_addr && - ec->data_addr == boot_ec->data_addr && - !EC_FLAGS_TRUST_DSDT_GPE) { + ec->data_addr == boot_ec->data_addr) { /* - * Trust PNP0C09 namespace location rather than - * ECDT ID. But trust ECDT GPE rather than _GPE - * because of ASUS quirks, so do not change - * boot_ec->gpe to ec->gpe. + * Trust PNP0C09 namespace location rather than ECDT ID. + * But trust ECDT GPE rather than _GPE because of ASUS + * quirks. So do not change boot_ec->gpe to ec->gpe, + * except when the TRUST_DSDT_GPE quirk is set. */ boot_ec->handle = ec->handle; + + if (EC_FLAGS_TRUST_DSDT_GPE) + boot_ec->gpe = ec->gpe; + acpi_handle_debug(ec->handle, "duplicated.\n"); acpi_ec_free(ec); ec = boot_ec; } } - ret = acpi_ec_setup(ec, device); + ret = acpi_ec_setup(ec, device, true); if (ret) goto err; @@ -1641,12 +1750,12 @@ err: return ret; } -static int acpi_ec_remove(struct acpi_device *device) +static void acpi_ec_remove(struct acpi_device *device) { struct acpi_ec *ec; if (!device) - return -EINVAL; + return; ec = acpi_driver_data(device); release_region(ec->data_addr, 1); @@ -1656,7 +1765,12 @@ static int acpi_ec_remove(struct acpi_device *device) ec_remove_handlers(ec); acpi_ec_free(ec); } - return 0; +} + +void acpi_ec_register_opregions(struct acpi_device *adev) +{ + if (first_ec && first_ec->handle != adev->handle) + acpi_execute_reg_methods(adev->handle, 1, ACPI_ADR_SPACE_EC); } static acpi_status @@ -1729,7 +1843,7 @@ void __init acpi_ec_dsdt_probe(void) * At this point, the GPE is not fully initialized, so do not to * handle the events. */ - ret = acpi_ec_setup(ec, NULL); + ret = acpi_ec_setup(ec, NULL, true); if (ret) { acpi_ec_free(ec); return; @@ -1830,68 +1944,71 @@ static int ec_honor_dsdt_gpe(const struct dmi_system_id *id) return 0; } -/* - * Some DSDTs contain wrong GPE setting. - * Asus FX502VD/VE, GL702VMK, X550VXK, X580VD - * https://bugzilla.kernel.org/show_bug.cgi?id=195651 - */ -static int ec_honor_ecdt_gpe(const struct dmi_system_id *id) -{ - pr_debug("Detected system needing ignore DSDT GPE setting.\n"); - EC_FLAGS_IGNORE_DSDT_GPE = 1; - return 0; -} - static const struct dmi_system_id ec_dmi_table[] __initconst = { { - ec_correct_ecdt, "MSI MS-171F", { - DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"), - DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL}, - { - ec_honor_ecdt_gpe, "ASUS FX502VD", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "FX502VD"),}, NULL}, - { - ec_honor_ecdt_gpe, "ASUS FX502VE", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "FX502VE"),}, NULL}, - { - ec_honor_ecdt_gpe, "ASUS GL702VMK", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "GL702VMK"),}, NULL}, - { - ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BA", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X505BA"),}, NULL}, - { - ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BP", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X505BP"),}, NULL}, - { - ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BA", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X542BA"),}, NULL}, + /* + * MSI MS-171F + * https://bugzilla.kernel.org/show_bug.cgi?id=12461 + */ + .callback = ec_correct_ecdt, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"), + DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"), + }, + }, { - ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BP", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X542BP"),}, NULL}, + /* + * HP Pavilion Gaming Laptop 15-cx0xxx + * https://bugzilla.kernel.org/show_bug.cgi?id=209989 + */ + .callback = ec_honor_dsdt_gpe, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-cx0xxx"), + }, + }, { - ec_honor_ecdt_gpe, "ASUS X550VXK", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X550VXK"),}, NULL}, + /* + * HP Pavilion Gaming Laptop 15-cx0041ur + */ + .callback = ec_honor_dsdt_gpe, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP 15-cx0041ur"), + }, + }, { - ec_honor_ecdt_gpe, "ASUS X580VD", { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL}, + /* + * HP Pavilion Gaming Laptop 15-dk1xxx + * https://github.com/systemd/systemd/issues/28942 + */ + .callback = ec_honor_dsdt_gpe, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-dk1xxx"), + }, + }, { - /* https://bugzilla.kernel.org/show_bug.cgi?id=209989 */ - ec_honor_dsdt_gpe, "HP Pavilion Gaming Laptop 15-cx0xxx", { - DMI_MATCH(DMI_SYS_VENDOR, "HP"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-cx0xxx"),}, NULL}, + /* + * HP 250 G7 Notebook PC + */ + .callback = ec_honor_dsdt_gpe, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP 250 G7 Notebook PC"), + }, + }, { - ec_clear_on_resume, "Samsung hardware", { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL}, - {}, + /* + * Samsung hardware + * https://bugzilla.kernel.org/show_bug.cgi?id=44161 + */ + .callback = ec_clear_on_resume, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + }, + }, + {} }; void __init acpi_ec_ecdt_probe(void) @@ -1916,6 +2033,25 @@ void __init acpi_ec_ecdt_probe(void) goto out; } + if (!strlen(ecdt_ptr->id)) { + /* + * The ECDT table on some MSI notebooks contains invalid data, together + * with an empty ID string (""). + * + * Section 5.2.15 of the ACPI specification requires the ID string to be + * a "fully qualified reference to the (...) embedded controller device", + * so this string always has to start with a backslash. + * + * However some ThinkBook machines have a ECDT table with a valid EC + * description but an invalid ID string ("_SB.PC00.LPCB.EC0"). + * + * Because of this we only check if the ID string is empty in order to + * avoid the obvious cases. + */ + pr_err(FW_BUG "Ignoring ECDT due to empty ID string\n"); + goto out; + } + ec = acpi_ec_alloc(); if (!ec) goto out; @@ -1941,7 +2077,7 @@ void __init acpi_ec_ecdt_probe(void) * At this point, the namespace is not initialized, so do not find * the namespace objects, or handle the events. */ - ret = acpi_ec_setup(ec, NULL); + ret = acpi_ec_setup(ec, NULL, false); if (ret) { acpi_ec_free(ec); goto out; @@ -2019,9 +2155,14 @@ void acpi_ec_set_gpe_wake_mask(u8 action) acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action); } +static bool acpi_ec_work_in_progress(struct acpi_ec *ec) +{ + return ec->events_in_progress + ec->queries_in_progress > 0; +} + bool acpi_ec_dispatch_gpe(void) { - u32 ret; + bool work_in_progress = false; if (!first_ec) return acpi_any_gpe_status_set(U32_MAX); @@ -2034,15 +2175,47 @@ bool acpi_ec_dispatch_gpe(void) return true; /* + * Cancel the SCI wakeup and process all pending events in case there + * are any wakeup ones in there. + * + * Note that if any non-EC GPEs are active at this point, the SCI will + * retrigger after the rearming in acpi_s2idle_wake(), so no events + * should be missed by canceling the wakeup here. + */ + pm_system_cancel_wakeup(); + + /* * Dispatch the EC GPE in-band, but do not report wakeup in any case * to allow the caller to process events properly after that. */ - ret = acpi_dispatch_gpe(NULL, first_ec->gpe); - if (ret == ACPI_INTERRUPT_HANDLED) - pm_pr_dbg("ACPI EC GPE dispatched\n"); + spin_lock_irq(&first_ec->lock); + + if (acpi_ec_gpe_status_set(first_ec)) { + pm_pr_dbg("ACPI EC GPE status set\n"); + + clear_gpe_and_advance_transaction(first_ec, false); + work_in_progress = acpi_ec_work_in_progress(first_ec); + } + + spin_unlock_irq(&first_ec->lock); + + if (!work_in_progress) + return false; - /* Flush the event and query workqueues. */ - acpi_ec_flush_work(); + pm_pr_dbg("ACPI EC GPE dispatched\n"); + + /* Drain EC work. */ + do { + acpi_ec_flush_work(); + + pm_pr_dbg("ACPI EC work flushed\n"); + + spin_lock_irq(&first_ec->lock); + + work_in_progress = acpi_ec_work_in_progress(first_ec); + + spin_unlock_irq(&first_ec->lock); + } while (work_in_progress && !pm_wakeup_pending()); return false; } @@ -2121,7 +2294,8 @@ static int acpi_ec_init_workqueues(void) ec_wq = alloc_ordered_workqueue("kec", 0); if (!ec_query_wq) - ec_query_wq = alloc_workqueue("kec_query", 0, ec_max_queries); + ec_query_wq = alloc_workqueue("kec_query", WQ_PERCPU, + ec_max_queries); if (!ec_wq || !ec_query_wq) { acpi_ec_destroy_workqueues(); @@ -2132,24 +2306,55 @@ static int acpi_ec_init_workqueues(void) static const struct dmi_system_id acpi_ec_no_wakeup[] = { { - .ident = "Thinkpad X1 Carbon 6th", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"), }, }, { - .ident = "ThinkPad X1 Carbon 6th", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Carbon 6th"), + DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Yoga 3rd"), }, }, { - .ident = "ThinkPad X1 Yoga 3rd", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Yoga 3rd"), + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"), + }, + }, + /* + * Lenovo Legion Go S; touchscreen blocks HW sleep when woken up from EC + * https://gitlab.freedesktop.org/drm/amd/-/issues/3929 + */ + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83L3"), + } + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83N6"), + } + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83Q2"), + } + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"), + } + }, + { + // TUXEDO InfinityBook Pro AMD Gen9 + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GXxHRXx"), }, }, { }, |
