summaryrefslogtreecommitdiff
path: root/drivers/acpi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi')
-rw-r--r--drivers/acpi/acpi_processor.c3
-rw-r--r--drivers/acpi/apei/ghes.c63
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/cppc_acpi.c31
-rw-r--r--drivers/acpi/ec.c112
-rw-r--r--drivers/acpi/internal.h1
-rw-r--r--drivers/acpi/scan.c172
-rw-r--r--drivers/acpi/x86/s2idle.c37
8 files changed, 217 insertions, 204 deletions
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 4fe2ef54088c..99c28b7307c3 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -381,6 +381,9 @@ static int acpi_processor_add(struct acpi_device *device,
struct device *dev;
int result = 0;
+ if (!acpi_device_is_enabled(device))
+ return -ENODEV;
+
pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
if (!pr)
return -ENOMEM;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index fe825a432c5b..ab2a82cb1b0b 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -26,7 +26,6 @@
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/cper.h>
-#include <linux/cxl-event.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/ratelimit.h>
@@ -674,52 +673,6 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
schedule_work(&entry->work);
}
-/*
- * Only a single callback can be registered for CXL CPER events.
- */
-static DECLARE_RWSEM(cxl_cper_rw_sem);
-static cxl_cper_callback cper_callback;
-
-static void cxl_cper_post_event(enum cxl_event_type event_type,
- struct cxl_cper_event_rec *rec)
-{
- if (rec->hdr.length <= sizeof(rec->hdr) ||
- rec->hdr.length > sizeof(*rec)) {
- pr_err(FW_WARN "CXL CPER Invalid section length (%u)\n",
- rec->hdr.length);
- return;
- }
-
- if (!(rec->hdr.validation_bits & CPER_CXL_COMP_EVENT_LOG_VALID)) {
- pr_err(FW_WARN "CXL CPER invalid event\n");
- return;
- }
-
- guard(rwsem_read)(&cxl_cper_rw_sem);
- if (cper_callback)
- cper_callback(event_type, rec);
-}
-
-int cxl_cper_register_callback(cxl_cper_callback callback)
-{
- guard(rwsem_write)(&cxl_cper_rw_sem);
- if (cper_callback)
- return -EINVAL;
- cper_callback = callback;
- return 0;
-}
-EXPORT_SYMBOL_NS_GPL(cxl_cper_register_callback, CXL);
-
-int cxl_cper_unregister_callback(cxl_cper_callback callback)
-{
- guard(rwsem_write)(&cxl_cper_rw_sem);
- if (callback != cper_callback)
- return -EINVAL;
- cper_callback = NULL;
- return 0;
-}
-EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_callback, CXL);
-
static bool ghes_do_proc(struct ghes *ghes,
const struct acpi_hest_generic_status *estatus)
{
@@ -754,22 +707,6 @@ static bool ghes_do_proc(struct ghes *ghes,
}
else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
queued = ghes_handle_arm_hw_error(gdata, sev, sync);
- } else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) {
- struct cxl_cper_event_rec *rec =
- acpi_hest_get_payload(gdata);
-
- cxl_cper_post_event(CXL_CPER_EVENT_GEN_MEDIA, rec);
- } else if (guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID)) {
- struct cxl_cper_event_rec *rec =
- acpi_hest_get_payload(gdata);
-
- cxl_cper_post_event(CXL_CPER_EVENT_DRAM, rec);
- } else if (guid_equal(sec_type,
- &CPER_SEC_CXL_MEM_MODULE_GUID)) {
- struct cxl_cper_event_rec *rec =
- acpi_hest_get_payload(gdata);
-
- cxl_cper_post_event(CXL_CPER_EVENT_MEM_MODULE, rec);
} else {
void *err = acpi_hest_get_payload(gdata);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 569bd15f211b..d9fa730416f1 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1097,7 +1097,7 @@ static void acpi_device_remove(struct device *dev)
put_device(dev);
}
-struct bus_type acpi_bus_type = {
+const struct bus_type acpi_bus_type = {
.name = "acpi",
.match = acpi_bus_match,
.probe = acpi_device_probe,
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index d155a86a8614..b954ce3638a9 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -166,6 +166,13 @@ show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
+/* Check for valid access_width, otherwise, fallback to using bit_width */
+#define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
+
+/* Shift and apply the mask for CPC reads/writes */
+#define MASK_VAL(reg, val) ((val) >> ((reg)->bit_offset & \
+ GENMASK(((reg)->bit_width), 0)))
+
static ssize_t show_feedback_ctrs(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -780,6 +787,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
} else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
if (gas_t->address) {
void __iomem *addr;
+ size_t access_width;
if (!osc_cpc_flexible_adr_space_confirmed) {
pr_debug("Flexible address space capability not supported\n");
@@ -787,7 +795,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free;
}
- addr = ioremap(gas_t->address, gas_t->bit_width/8);
+ access_width = GET_BIT_WIDTH(gas_t) / 8;
+ addr = ioremap(gas_t->address, access_width);
if (!addr)
goto out_free;
cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
@@ -983,6 +992,7 @@ int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
{
void __iomem *vaddr = NULL;
+ int size;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
@@ -994,7 +1004,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = 0;
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
- u32 width = 8 << (reg->access_width - 1);
+ u32 width = GET_BIT_WIDTH(reg);
u32 val_u32;
acpi_status status;
@@ -1018,7 +1028,9 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
return acpi_os_read_memory((acpi_physical_address)reg->address,
val, reg->bit_width);
- switch (reg->bit_width) {
+ size = GET_BIT_WIDTH(reg);
+
+ switch (size) {
case 8:
*val = readb_relaxed(vaddr);
break;
@@ -1037,18 +1049,22 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
return -EFAULT;
}
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ *val = MASK_VAL(reg, *val);
+
return 0;
}
static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
{
int ret_val = 0;
+ int size;
void __iomem *vaddr = NULL;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
- u32 width = 8 << (reg->access_width - 1);
+ u32 width = GET_BIT_WIDTH(reg);
acpi_status status;
status = acpi_os_write_port((acpi_io_address)reg->address,
@@ -1070,7 +1086,12 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
return acpi_os_write_memory((acpi_physical_address)reg->address,
val, reg->bit_width);
- switch (reg->bit_width) {
+ size = GET_BIT_WIDTH(reg);
+
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ val = MASK_VAL(reg, val);
+
+ switch (size) {
case 8:
writeb_relaxed(val, vaddr);
break;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index dbdee2924594..02255795b800 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -525,10 +525,12 @@ static void acpi_ec_clear(struct acpi_ec *ec)
static void acpi_ec_enable_event(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
if (acpi_ec_started(ec))
__acpi_ec_enable_event(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
/* Drain additional events if hardware requires that */
if (EC_FLAGS_CLEAR_ON_RESUME)
@@ -544,9 +546,11 @@ static void __acpi_ec_flush_work(void)
static void acpi_ec_disable_event(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
__acpi_ec_disable_event(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
/*
* When ec_freeze_events is true, we need to flush events in
@@ -567,9 +571,10 @@ void acpi_ec_flush_work(void)
static bool acpi_ec_guard_event(struct acpi_ec *ec)
{
+ unsigned long flags;
bool guarded;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
/*
* If firmware SCI_EVT clearing timing is "event", we actually
* don't know when the SCI_EVT will be cleared by firmware after
@@ -585,29 +590,31 @@ static bool acpi_ec_guard_event(struct acpi_ec *ec)
guarded = ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
ec->event_state != EC_EVENT_READY &&
(!ec->curr || ec->curr->command != ACPI_EC_COMMAND_QUERY);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
return guarded;
}
static int ec_transaction_polled(struct acpi_ec *ec)
{
+ unsigned long flags;
int ret = 0;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
ret = 1;
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
return ret;
}
static int ec_transaction_completed(struct acpi_ec *ec)
{
+ unsigned long flags;
int ret = 0;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
ret = 1;
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
return ret;
}
@@ -749,6 +756,7 @@ static int ec_guard(struct acpi_ec *ec)
static int ec_poll(struct acpi_ec *ec)
{
+ unsigned long flags;
int repeat = 5; /* number of command restarts */
while (repeat--) {
@@ -757,14 +765,14 @@ static int ec_poll(struct acpi_ec *ec)
do {
if (!ec_guard(ec))
return 0;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
advance_transaction(ec, false);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
} while (time_before(jiffies, delay));
pr_debug("controller reset, restart transaction\n");
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
start_transaction(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
return -ETIME;
}
@@ -772,10 +780,11 @@ static int ec_poll(struct acpi_ec *ec)
static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
struct transaction *t)
{
+ unsigned long tmp;
int ret = 0;
/* start transaction */
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, tmp);
/* Enable GPE for command processing (IBF=0/OBF=1) */
if (!acpi_ec_submit_flushable_request(ec)) {
ret = -EINVAL;
@@ -786,11 +795,11 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
ec->curr = t;
ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
start_transaction(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, tmp);
ret = ec_poll(ec);
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, tmp);
if (t->irq_count == ec_storm_threshold)
acpi_ec_unmask_events(ec);
ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
@@ -799,7 +808,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
acpi_ec_complete_request(ec);
ec_dbg_ref(ec, "Decrease command");
unlock:
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, tmp);
return ret;
}
@@ -927,7 +936,9 @@ EXPORT_SYMBOL(ec_get_handle);
static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
ec_dbg_drv("Starting EC");
/* Enable GPE for event processing (SCI_EVT=1) */
@@ -937,28 +948,31 @@ static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
}
ec_log_drv("EC started");
}
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
static bool acpi_ec_stopped(struct acpi_ec *ec)
{
+ unsigned long flags;
bool flushed;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
flushed = acpi_ec_flushed(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
return flushed;
}
static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
if (acpi_ec_started(ec)) {
ec_dbg_drv("Stopping EC");
set_bit(EC_FLAGS_STOPPED, &ec->flags);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
wait_event(ec->wait, acpi_ec_stopped(ec));
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
/* Disable GPE for event processing (SCI_EVT=1) */
if (!suspending) {
acpi_ec_complete_request(ec);
@@ -969,25 +983,29 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
clear_bit(EC_FLAGS_STOPPED, &ec->flags);
ec_log_drv("EC stopped");
}
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
static void acpi_ec_enter_noirq(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
ec->busy_polling = true;
ec->polling_guard = 0;
ec_log_drv("interrupt blocked");
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
static void acpi_ec_leave_noirq(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
ec->busy_polling = ec_busy_polling;
ec->polling_guard = ec_polling_guard;
ec_log_drv("interrupt unblocked");
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
void acpi_ec_block_transactions(void)
@@ -1119,9 +1137,9 @@ static void acpi_ec_event_processor(struct work_struct *work)
ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
ec->queries_in_progress--;
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
acpi_ec_put_query_handler(handler);
kfree(q);
@@ -1184,12 +1202,12 @@ static int acpi_ec_submit_query(struct acpi_ec *ec)
*/
ec_dbg_evt("Query(0x%02x) scheduled", value);
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
ec->queries_in_progress++;
queue_work(ec_query_wq, &q->work);
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
return 0;
@@ -1205,14 +1223,14 @@ static void acpi_ec_event_handler(struct work_struct *work)
ec_dbg_evt("Event started");
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
while (ec->events_to_process) {
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
acpi_ec_submit_query(ec);
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
ec->events_to_process--;
}
@@ -1229,11 +1247,11 @@ static void acpi_ec_event_handler(struct work_struct *work)
ec_dbg_evt("Event stopped");
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
guard_timeout = !!ec_guard(ec);
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
/* Take care of SCI_EVT unless someone else is doing that. */
if (guard_timeout && !ec->curr)
@@ -1246,7 +1264,7 @@ static void acpi_ec_event_handler(struct work_struct *work)
ec->events_in_progress--;
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
}
static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt)
@@ -1271,11 +1289,13 @@ static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt
static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
clear_gpe_and_advance_transaction(ec, true);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
@@ -2085,7 +2105,7 @@ bool acpi_ec_dispatch_gpe(void)
* Dispatch the EC GPE in-band, but do not report wakeup in any case
* to allow the caller to process events properly after that.
*/
- spin_lock(&first_ec->lock);
+ spin_lock_irq(&first_ec->lock);
if (acpi_ec_gpe_status_set(first_ec)) {
pm_pr_dbg("ACPI EC GPE status set\n");
@@ -2094,7 +2114,7 @@ bool acpi_ec_dispatch_gpe(void)
work_in_progress = acpi_ec_work_in_progress(first_ec);
}
- spin_unlock(&first_ec->lock);
+ spin_unlock_irq(&first_ec->lock);
if (!work_in_progress)
return false;
@@ -2107,11 +2127,11 @@ bool acpi_ec_dispatch_gpe(void)
pm_pr_dbg("ACPI EC work flushed\n");
- spin_lock(&first_ec->lock);
+ spin_lock_irq(&first_ec->lock);
work_in_progress = acpi_ec_work_in_progress(first_ec);
- spin_unlock(&first_ec->lock);
+ spin_unlock_irq(&first_ec->lock);
} while (work_in_progress && !pm_wakeup_pending());
return false;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 6588525c45ef..895aa2c0ad58 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -121,6 +121,7 @@ int acpi_device_setup_files(struct acpi_device *dev);
void acpi_device_remove_files(struct acpi_device *dev);
void acpi_device_add_finalize(struct acpi_device *device);
void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
+bool acpi_device_is_enabled(const struct acpi_device *adev);
bool acpi_device_is_present(const struct acpi_device *adev);
bool acpi_device_is_battery(struct acpi_device *adev);
bool acpi_device_is_first_physical_node(struct acpi_device *adev,
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index e6ed1ba91e5c..3b722e4c0f06 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -244,6 +244,53 @@ static int acpi_scan_try_to_offline(struct acpi_device *device)
return 0;
}
+static int acpi_scan_check_and_detach(struct acpi_device *adev, void *check)
+{
+ struct acpi_scan_handler *handler = adev->handler;
+
+ acpi_dev_for_each_child_reverse(adev, acpi_scan_check_and_detach, check);
+
+ if (check) {
+ acpi_bus_get_status(adev);
+ /*
+ * Skip devices that are still there and take the enabled
+ * flag into account.
+ */
+ if (acpi_device_is_enabled(adev))
+ return 0;
+
+ /* Skip device that have not been enumerated. */
+ if (!acpi_device_enumerated(adev)) {
+ dev_dbg(&adev->dev, "Still not enumerated\n");
+ return 0;
+ }
+ }
+
+ adev->flags.match_driver = false;
+ if (handler) {
+ if (handler->detach)
+ handler->detach(adev);
+
+ adev->handler = NULL;
+ } else {
+ device_release_driver(&adev->dev);
+ }
+ /*
+ * Most likely, the device is going away, so put it into D3cold before
+ * that.
+ */
+ acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
+ adev->flags.initialized = false;
+ acpi_device_clear_enumerated(adev);
+
+ return 0;
+}
+
+static void acpi_scan_check_subtree(struct acpi_device *adev)
+{
+ acpi_scan_check_and_detach(adev, (void *)true);
+}
+
static int acpi_scan_hot_remove(struct acpi_device *device)
{
acpi_handle handle = device->handle;
@@ -289,75 +336,62 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
return 0;
}
-static int acpi_scan_device_not_enumerated(struct acpi_device *adev)
+static int acpi_scan_rescan_bus(struct acpi_device *adev)
{
- if (!acpi_device_enumerated(adev)) {
- dev_warn(&adev->dev, "Still not enumerated\n");
- return -EALREADY;
- }
- acpi_bus_trim(adev);
- return 0;
+ struct acpi_scan_handler *handler = adev->handler;
+ int ret;
+
+ if (handler && handler->hotplug.scan_dependent)
+ ret = handler->hotplug.scan_dependent(adev);
+ else
+ ret = acpi_bus_scan(adev->handle);
+
+ if (ret)
+ dev_info(&adev->dev, "Namespace scan failure\n");
+
+ return ret;
}
static int acpi_scan_device_check(struct acpi_device *adev)
{
- int error;
+ struct acpi_device *parent;
- acpi_bus_get_status(adev);
- if (acpi_device_is_present(adev)) {
- /*
- * This function is only called for device objects for which
- * matching scan handlers exist. The only situation in which
- * the scan handler is not attached to this device object yet
- * is when the device has just appeared (either it wasn't
- * present at all before or it was removed and then added
- * again).
- */
- if (adev->handler) {
- dev_warn(&adev->dev, "Already enumerated\n");
- return -EALREADY;
- }
- error = acpi_bus_scan(adev->handle);
- if (error) {
- dev_warn(&adev->dev, "Namespace scan failure\n");
- return error;
- }
- if (!adev->handler) {
- dev_warn(&adev->dev, "Enumeration failure\n");
- error = -ENODEV;
- }
- } else {
- error = acpi_scan_device_not_enumerated(adev);
- }
- return error;
-}
+ acpi_scan_check_subtree(adev);
-static int acpi_scan_bus_check(struct acpi_device *adev, void *not_used)
-{
- struct acpi_scan_handler *handler = adev->handler;
- int error;
+ if (!acpi_device_is_present(adev))
+ return 0;
- acpi_bus_get_status(adev);
- if (!acpi_device_is_present(adev)) {
- acpi_scan_device_not_enumerated(adev);
+ /*
+ * This function is only called for device objects for which matching
+ * scan handlers exist. The only situation in which the scan handler
+ * is not attached to this device object yet is when the device has
+ * just appeared (either it wasn't present at all before or it was
+ * removed and then added again).
+ */
+ if (adev->handler) {
+ dev_dbg(&adev->dev, "Already enumerated\n");
return 0;
}
- if (handler && handler->hotplug.scan_dependent)
- return handler->hotplug.scan_dependent(adev);
- error = acpi_bus_scan(adev->handle);
- if (error) {
- dev_warn(&adev->dev, "Namespace scan failure\n");
- return error;
- }
- return acpi_dev_for_each_child(adev, acpi_scan_bus_check, NULL);
+ parent = acpi_dev_parent(adev);
+ if (!parent)
+ parent = adev;
+
+ return acpi_scan_rescan_bus(parent);
+}
+
+static int acpi_scan_bus_check(struct acpi_device *adev)
+{
+ acpi_scan_check_subtree(adev);
+
+ return acpi_scan_rescan_bus(adev);
}
static int acpi_generic_hotplug_event(struct acpi_device *adev, u32 type)
{
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
- return acpi_scan_bus_check(adev, NULL);
+ return acpi_scan_bus_check(adev);
case ACPI_NOTIFY_DEVICE_CHECK:
return acpi_scan_device_check(adev);
case ACPI_NOTIFY_EJECT_REQUEST:
@@ -798,6 +832,7 @@ static const char * const acpi_honor_dep_ids[] = {
"INTC1059", /* IVSC (TGL) driver must be loaded to allow i2c access to camera sensors */
"INTC1095", /* IVSC (ADL) driver must be loaded to allow i2c access to camera sensors */
"INTC100A", /* IVSC (RPL) driver must be loaded to allow i2c access to camera sensors */
+ "INTC10CF", /* IVSC (MTL) driver must be loaded to allow i2c access to camera sensors */
NULL
};
@@ -1922,6 +1957,11 @@ bool acpi_device_is_present(const struct acpi_device *adev)
return adev->status.present || adev->status.functional;
}
+bool acpi_device_is_enabled(const struct acpi_device *adev)
+{
+ return adev->status.present && adev->status.enabled;
+}
+
static bool acpi_scan_handler_matching(struct acpi_scan_handler *handler,
const char *idstr,
const struct acpi_device_id **matchid)
@@ -2550,32 +2590,6 @@ int acpi_bus_scan(acpi_handle handle)
}
EXPORT_SYMBOL(acpi_bus_scan);
-static int acpi_bus_trim_one(struct acpi_device *adev, void *not_used)
-{
- struct acpi_scan_handler *handler = adev->handler;
-
- acpi_dev_for_each_child_reverse(adev, acpi_bus_trim_one, NULL);
-
- adev->flags.match_driver = false;
- if (handler) {
- if (handler->detach)
- handler->detach(adev);
-
- adev->handler = NULL;
- } else {
- device_release_driver(&adev->dev);
- }
- /*
- * Most likely, the device is going away, so put it into D3cold before
- * that.
- */
- acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
- adev->flags.initialized = false;
- acpi_device_clear_enumerated(adev);
-
- return 0;
-}
-
/**
* acpi_bus_trim - Detach scan handlers and drivers from ACPI device objects.
* @adev: Root of the ACPI namespace scope to walk.
@@ -2584,7 +2598,7 @@ static int acpi_bus_trim_one(struct acpi_device *adev, void *not_used)
*/
void acpi_bus_trim(struct acpi_device *adev)
{
- acpi_bus_trim_one(adev, NULL);
+ acpi_scan_check_and_detach(adev, NULL);
}
EXPORT_SYMBOL_GPL(acpi_bus_trim);
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index 7d64e655f1b8..cd84af23f7ea 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -488,7 +488,21 @@ static int lps0_device_attach(struct acpi_device *adev,
rev_id = 1;
lps0_dsm_func_mask = validate_dsm(adev->handle,
ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid);
- lps0_dsm_func_mask_microsoft = -EINVAL;
+ if (lps0_dsm_func_mask > 0 && lps0_dsm_func_mask_microsoft > 0) {
+ unsigned int func_mask;
+
+ /*
+ * Avoid evaluating the same _DSM function for two
+ * different UUIDs and prioritize the MSFT one.
+ */
+ func_mask = lps0_dsm_func_mask & lps0_dsm_func_mask_microsoft;
+ if (func_mask) {
+ acpi_handle_info(adev->handle,
+ "Duplicate LPS0 _DSM functions (mask: 0x%x)\n",
+ func_mask);
+ lps0_dsm_func_mask &= ~func_mask;
+ }
+ }
}
if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0)
@@ -549,19 +563,22 @@ int acpi_s2idle_prepare_late(void)
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
/* LPS0 entry */
- if (lps0_dsm_func_mask > 0)
- acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ?
- ACPI_LPS0_ENTRY_AMD :
- ACPI_LPS0_ENTRY,
+ if (lps0_dsm_func_mask > 0 && acpi_s2idle_vendor_amd())
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD,
lps0_dsm_func_mask, lps0_dsm_guid);
+
if (lps0_dsm_func_mask_microsoft > 0) {
- /* modern standby entry */
+ /* Modern Standby entry */
acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
}
+ if (lps0_dsm_func_mask > 0 && !acpi_s2idle_vendor_amd())
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
+ lps0_dsm_func_mask, lps0_dsm_guid);
+
list_for_each_entry(handler, &lps0_s2idle_devops_head, list_node) {
if (handler->prepare)
handler->prepare();
@@ -600,14 +617,14 @@ void acpi_s2idle_restore_early(void)
ACPI_LPS0_EXIT_AMD :
ACPI_LPS0_EXIT,
lps0_dsm_func_mask, lps0_dsm_guid);
- if (lps0_dsm_func_mask_microsoft > 0)
+
+ if (lps0_dsm_func_mask_microsoft > 0) {
acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
-
- /* Modern standby exit */
- if (lps0_dsm_func_mask_microsoft > 0)
+ /* Modern Standby exit */
acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
+ }
/* Screen on */
if (lps0_dsm_func_mask_microsoft > 0)