diff options
Diffstat (limited to 'drivers/thunderbolt/domain.c')
| -rw-r--r-- | drivers/thunderbolt/domain.c | 312 |
1 files changed, 190 insertions, 122 deletions
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c index 7416bdbd8576..3ced37b4a869 100644 --- a/drivers/thunderbolt/domain.c +++ b/drivers/thunderbolt/domain.c @@ -7,14 +7,13 @@ */ #include <linux/device.h> -#include <linux/dmar.h> #include <linux/idr.h> -#include <linux/iommu.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/random.h> -#include <crypto/hash.h> +#include <crypto/sha2.h> +#include <crypto/utils.h> #include "tb.h" @@ -38,7 +37,7 @@ static bool match_service_id(const struct tb_service_id *id, return false; } - if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) { + if (id->match_flags & TBSVC_MATCH_PROTOCOL_REVISION) { if (id->protocol_revision != svc->prtcrevs) return false; } @@ -47,9 +46,9 @@ static bool match_service_id(const struct tb_service_id *id, } static const struct tb_service_id *__tb_service_match(struct device *dev, - struct device_driver *drv) + const struct device_driver *drv) { - struct tb_service_driver *driver; + const struct tb_service_driver *driver; const struct tb_service_id *ids; struct tb_service *svc; @@ -57,7 +56,7 @@ static const struct tb_service_id *__tb_service_match(struct device *dev, if (!svc) return NULL; - driver = container_of(drv, struct tb_service_driver, driver); + driver = container_of_const(drv, struct tb_service_driver, driver); if (!driver->id_table) return NULL; @@ -69,7 +68,7 @@ static const struct tb_service_id *__tb_service_match(struct device *dev, return NULL; } -static int tb_service_match(struct device *dev, struct device_driver *drv) +static int tb_service_match(struct device *dev, const struct device_driver *drv) { return !!__tb_service_match(dev, drv); } @@ -86,7 +85,7 @@ static int tb_service_probe(struct device *dev) return driver->probe(svc, id); } -static int tb_service_remove(struct device *dev) +static void tb_service_remove(struct device *dev) { struct tb_service *svc = tb_to_service(dev); struct tb_service_driver *driver; @@ -94,8 +93,6 @@ static int tb_service_remove(struct device *dev) driver = container_of(dev->driver, struct tb_service_driver, driver); if (driver->remove) driver->remove(svc); - - return 0; } static void tb_service_shutdown(struct device *dev) @@ -118,6 +115,7 @@ static const char * const tb_security_names[] = { [TB_SECURITY_SECURE] = "secure", [TB_SECURITY_DPONLY] = "dponly", [TB_SECURITY_USBONLY] = "usbonly", + [TB_SECURITY_NOPCIE] = "nopcie", }; static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr, @@ -147,11 +145,9 @@ static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr, for (ret = 0, i = 0; i < tb->nboot_acl; i++) { if (!uuid_is_null(&uuids[i])) - ret += snprintf(buf + ret, PAGE_SIZE - ret, "%pUb", - &uuids[i]); + ret += sysfs_emit_at(buf, ret, "%pUb", &uuids[i]); - ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s", - i < tb->nboot_acl - 1 ? "," : "\n"); + ret += sysfs_emit_at(buf, ret, "%s", i < tb->nboot_acl - 1 ? "," : "\n"); } out: @@ -222,7 +218,7 @@ static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr, ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl); if (!ret) { /* Notify userspace about the change */ - kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE); + tb_domain_event(tb, NULL); } mutex_unlock(&tb->lock); @@ -238,17 +234,29 @@ err_free_str: } static DEVICE_ATTR_RW(boot_acl); +static ssize_t deauthorization_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + const struct tb *tb = container_of(dev, struct tb, dev); + bool deauthorization = false; + + /* Only meaningful if authorization is supported */ + if (tb->security_level == TB_SECURITY_USER || + tb->security_level == TB_SECURITY_SECURE) + deauthorization = !!tb->cm_ops->disapprove_switch; + + return sysfs_emit(buf, "%d\n", deauthorization); +} +static DEVICE_ATTR_RO(deauthorization); + static ssize_t iommu_dma_protection_show(struct device *dev, struct device_attribute *attr, char *buf) { - /* - * Kernel DMA protection is a feature where Thunderbolt security is - * handled natively using IOMMU. It is enabled when IOMMU is - * enabled and ACPI DMAR table has DMAR_PLATFORM_OPT_IN set. - */ - return sprintf(buf, "%d\n", - iommu_present(&pci_bus_type) && dmar_platform_optin()); + struct tb *tb = container_of(dev, struct tb, dev); + + return sysfs_emit(buf, "%d\n", tb->nhi->iommu_dma_protection); } static DEVICE_ATTR_RO(iommu_dma_protection); @@ -261,12 +269,13 @@ static ssize_t security_show(struct device *dev, struct device_attribute *attr, if (tb->security_level < ARRAY_SIZE(tb_security_names)) name = tb_security_names[tb->security_level]; - return sprintf(buf, "%s\n", name); + return sysfs_emit(buf, "%s\n", name); } static DEVICE_ATTR_RO(security); static struct attribute *domain_attrs[] = { &dev_attr_boot_acl.attr, + &dev_attr_deauthorization.attr, &dev_attr_iommu_dma_protection.attr, &dev_attr_security.attr, NULL, @@ -275,7 +284,7 @@ static struct attribute *domain_attrs[] = { static umode_t domain_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct tb *tb = container_of(dev, struct tb, dev); if (attr == &dev_attr_boot_acl.attr) { @@ -289,7 +298,7 @@ static umode_t domain_attr_is_visible(struct kobject *kobj, return attr->mode; } -static struct attribute_group domain_attr_group = { +static const struct attribute_group domain_attr_group = { .is_visible = domain_attr_is_visible, .attrs = domain_attrs, }; @@ -299,7 +308,7 @@ static const struct attribute_group *domain_attr_groups[] = { NULL, }; -struct bus_type tb_bus_type = { +const struct bus_type tb_bus_type = { .name = "thunderbolt", .match = tb_service_match, .probe = tb_service_probe, @@ -313,19 +322,44 @@ static void tb_domain_release(struct device *dev) tb_ctl_free(tb->ctl); destroy_workqueue(tb->wq); - ida_simple_remove(&tb_domain_ida, tb->index); + ida_free(&tb_domain_ida, tb->index); mutex_destroy(&tb->lock); kfree(tb); } -struct device_type tb_domain_type = { +const struct device_type tb_domain_type = { .name = "thunderbolt_domain", .release = tb_domain_release, }; +static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type, + const void *buf, size_t size) +{ + struct tb *tb = data; + + if (!tb->cm_ops->handle_event) { + tb_warn(tb, "domain does not have event handler\n"); + return true; + } + + switch (type) { + case TB_CFG_PKG_XDOMAIN_REQ: + case TB_CFG_PKG_XDOMAIN_RESP: + if (tb_is_xdomain_enabled()) + return tb_xdomain_handle_request(tb, type, buf, size); + break; + + default: + tb->cm_ops->handle_event(tb, type, buf, size); + } + + return true; +} + /** * tb_domain_alloc() - Allocate a domain * @nhi: Pointer to the host controller + * @timeout_msec: Control channel timeout for non-raw messages * @privsize: Size of the connection manager private data * * Allocates and initializes a new Thunderbolt domain. Connection @@ -335,14 +369,14 @@ struct device_type tb_domain_type = { * Call tb_domain_put() to release the domain before it has been added * to the system. * - * Return: allocated domain structure on %NULL in case of error + * Return: Pointer to &struct tb or %NULL in case of error. */ -struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize) +struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize) { struct tb *tb; /* - * Make sure the structure sizes map with that the hardware + * Make sure the structure sizes map with what the hardware * expects because bit-fields are being used. */ BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4); @@ -356,7 +390,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize) tb->nhi = nhi; mutex_init(&tb->lock); - tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL); + tb->index = ida_alloc(&tb_domain_ida, GFP_KERNEL); if (tb->index < 0) goto err_free; @@ -364,6 +398,10 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize) if (!tb->wq) goto err_remove_ida; + tb->ctl = tb_ctl_alloc(nhi, tb->index, timeout_msec, tb_domain_event_cb, tb); + if (!tb->ctl) + goto err_destroy_wq; + tb->dev.parent = &nhi->pdev->dev; tb->dev.bus = &tb_bus_type; tb->dev.type = &tb_domain_type; @@ -373,48 +411,29 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize) return tb; +err_destroy_wq: + destroy_workqueue(tb->wq); err_remove_ida: - ida_simple_remove(&tb_domain_ida, tb->index); + ida_free(&tb_domain_ida, tb->index); err_free: kfree(tb); return NULL; } -static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type, - const void *buf, size_t size) -{ - struct tb *tb = data; - - if (!tb->cm_ops->handle_event) { - tb_warn(tb, "domain does not have event handler\n"); - return true; - } - - switch (type) { - case TB_CFG_PKG_XDOMAIN_REQ: - case TB_CFG_PKG_XDOMAIN_RESP: - return tb_xdomain_handle_request(tb, type, buf, size); - - default: - tb->cm_ops->handle_event(tb, type, buf, size); - } - - return true; -} - /** * tb_domain_add() - Add domain to the system * @tb: Domain to add + * @reset: Issue reset to the host router * * Starts the domain and adds it to the system. Hotplugging devices will * work after this has been returned successfully. In order to remove * and release the domain after this function has been called, call * tb_domain_remove(). * - * Return: %0 in case of success and negative errno in case of error + * Return: %0 on success, negative errno otherwise. */ -int tb_domain_add(struct tb *tb) +int tb_domain_add(struct tb *tb, bool reset) { int ret; @@ -422,13 +441,6 @@ int tb_domain_add(struct tb *tb) return -EINVAL; mutex_lock(&tb->lock); - - tb->ctl = tb_ctl_alloc(tb->nhi, tb_domain_event_cb, tb); - if (!tb->ctl) { - ret = -ENOMEM; - goto err_unlock; - } - /* * tb_schedule_hotplug_handler may be called as soon as the config * channel is started. Thats why we have to hold the lock here. @@ -441,13 +453,16 @@ int tb_domain_add(struct tb *tb) goto err_ctl_stop; } + tb_dbg(tb, "security level set to %s\n", + tb_security_names[tb->security_level]); + ret = device_add(&tb->dev); if (ret) goto err_ctl_stop; /* Start the domain */ if (tb->cm_ops->start) { - ret = tb->cm_ops->start(tb); + ret = tb->cm_ops->start(tb, reset); if (ret) goto err_domain_del; } @@ -455,6 +470,8 @@ int tb_domain_add(struct tb *tb) /* This starts event processing */ mutex_unlock(&tb->lock); + device_init_wakeup(&tb->dev, true); + pm_runtime_no_callbacks(&tb->dev); pm_runtime_set_active(&tb->dev); pm_runtime_enable(&tb->dev); @@ -468,7 +485,6 @@ err_domain_del: device_del(&tb->dev); err_ctl_stop: tb_ctl_stop(tb->ctl); -err_unlock: mutex_unlock(&tb->lock); return ret; @@ -491,6 +507,10 @@ void tb_domain_remove(struct tb *tb) mutex_unlock(&tb->lock); flush_workqueue(tb->wq); + + if (tb->cm_ops->deinit) + tb->cm_ops->deinit(tb); + device_unregister(&tb->dev); } @@ -499,6 +519,8 @@ void tb_domain_remove(struct tb *tb) * @tb: Domain to suspend * * Suspends all devices in the domain and stops the control channel. + * + * Return: %0 on success, negative errno otherwise. */ int tb_domain_suspend_noirq(struct tb *tb) { @@ -525,6 +547,8 @@ int tb_domain_suspend_noirq(struct tb *tb) * * Re-starts the control channel, and resumes all devices connected to * the domain. + * + * Return: %0 on success, negative errno otherwise. */ int tb_domain_resume_noirq(struct tb *tb) { @@ -544,6 +568,33 @@ int tb_domain_suspend(struct tb *tb) return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0; } +int tb_domain_freeze_noirq(struct tb *tb) +{ + int ret = 0; + + mutex_lock(&tb->lock); + if (tb->cm_ops->freeze_noirq) + ret = tb->cm_ops->freeze_noirq(tb); + if (!ret) + tb_ctl_stop(tb->ctl); + mutex_unlock(&tb->lock); + + return ret; +} + +int tb_domain_thaw_noirq(struct tb *tb) +{ + int ret = 0; + + mutex_lock(&tb->lock); + tb_ctl_start(tb->ctl); + if (tb->cm_ops->thaw_noirq) + ret = tb->cm_ops->thaw_noirq(tb); + mutex_unlock(&tb->lock); + + return ret; +} + void tb_domain_complete(struct tb *tb) { if (tb->cm_ops->complete) @@ -573,13 +624,32 @@ int tb_domain_runtime_resume(struct tb *tb) } /** + * tb_domain_disapprove_switch() - Disapprove switch + * @tb: Domain the switch belongs to + * @sw: Switch to disapprove + * + * This will disconnect PCIe tunnel from parent to this @sw. + * + * Return: %0 on success and negative errno in case of failure. + */ +int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw) +{ + if (!tb->cm_ops->disapprove_switch) + return -EPERM; + + return tb->cm_ops->disapprove_switch(tb, sw); +} + +/** * tb_domain_approve_switch() - Approve switch * @tb: Domain the switch belongs to * @sw: Switch to approve * * This will approve switch by connection manager specific means. In - * case of success the connection manager will create tunnels for all - * supported protocols. + * case of success the connection manager will create PCIe tunnel from + * parent to @sw. + * + * Return: %0 on success, negative errno otherwise. */ int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw) { @@ -645,8 +715,6 @@ int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw) u8 response[TB_SWITCH_KEY_SIZE]; u8 hmac[TB_SWITCH_KEY_SIZE]; struct tb_switch *parent_sw; - struct crypto_shash *tfm; - struct shash_desc *shash; int ret; if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key) @@ -662,46 +730,15 @@ int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw) if (ret) return ret; - tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - - ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE); - if (ret) - goto err_free_tfm; - - shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm), - GFP_KERNEL); - if (!shash) { - ret = -ENOMEM; - goto err_free_tfm; - } - - shash->tfm = tfm; - shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP; - - memset(hmac, 0, sizeof(hmac)); - ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac); - if (ret) - goto err_free_shash; + static_assert(sizeof(hmac) == SHA256_DIGEST_SIZE); + hmac_sha256_usingrawkey(sw->key, TB_SWITCH_KEY_SIZE, + challenge, sizeof(challenge), hmac); /* The returned HMAC must match the one we calculated */ - if (memcmp(response, hmac, sizeof(hmac))) { - ret = -EKEYREJECTED; - goto err_free_shash; - } - - crypto_free_shash(tfm); - kfree(shash); + if (crypto_memneq(response, hmac, sizeof(hmac))) + return -EKEYREJECTED; return tb->cm_ops->approve_switch(tb, sw); - -err_free_shash: - kfree(shash); -err_free_tfm: - crypto_free_shash(tfm); - - return ret; } /** @@ -711,7 +748,7 @@ err_free_tfm: * This needs to be called in preparation for NVM upgrade of the host * controller. Makes sure all PCIe paths are disconnected. * - * Return %0 on success and negative errno in case of error. + * Return: %0 on success and negative errno in case of error. */ int tb_domain_disconnect_pcie_paths(struct tb *tb) { @@ -725,40 +762,58 @@ int tb_domain_disconnect_pcie_paths(struct tb *tb) * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain * @tb: Domain enabling the DMA paths * @xd: XDomain DMA paths are created to + * @transmit_path: HopID we are using to send out packets + * @transmit_ring: DMA ring used to send out packets + * @receive_path: HopID the other end is using to send packets to us + * @receive_ring: DMA ring used to receive packets from @receive_path * * Calls connection manager specific method to enable DMA paths to the * XDomain in question. * - * Return: 0% in case of success and negative errno otherwise. In - * particular returns %-ENOTSUPP if the connection manager - * implementation does not support XDomains. + * Return: + * * %0 - On success. + * * %-ENOTSUPP - If the connection manager implementation does not support + * XDomains. + * * Negative errno - An error occurred. */ -int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) +int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring) { if (!tb->cm_ops->approve_xdomain_paths) return -ENOTSUPP; - return tb->cm_ops->approve_xdomain_paths(tb, xd); + return tb->cm_ops->approve_xdomain_paths(tb, xd, transmit_path, + transmit_ring, receive_path, receive_ring); } /** * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain * @tb: Domain disabling the DMA paths * @xd: XDomain whose DMA paths are disconnected + * @transmit_path: HopID we are using to send out packets + * @transmit_ring: DMA ring used to send out packets + * @receive_path: HopID the other end is using to send packets to us + * @receive_ring: DMA ring used to receive packets from @receive_path * * Calls connection manager specific method to disconnect DMA paths to * the XDomain in question. * - * Return: 0% in case of success and negative errno otherwise. In - * particular returns %-ENOTSUPP if the connection manager - * implementation does not support XDomains. + * Return: + * * %0 - On success. + * * %-ENOTSUPP - If the connection manager implementation does not support + * XDomains. + * * Negative errno - An error occurred. */ -int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) +int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring) { if (!tb->cm_ops->disconnect_xdomain_paths) return -ENOTSUPP; - return tb->cm_ops->disconnect_xdomain_paths(tb, xd); + return tb->cm_ops->disconnect_xdomain_paths(tb, xd, transmit_path, + transmit_ring, receive_path, receive_ring); } static int disconnect_xdomain(struct device *dev, void *data) @@ -769,7 +824,7 @@ static int disconnect_xdomain(struct device *dev, void *data) xd = tb_to_xdomain(dev); if (xd && xd->tb == tb) - ret = tb_xdomain_disable_paths(xd); + ret = tb_xdomain_disable_all_paths(xd); return ret; } @@ -799,12 +854,23 @@ int tb_domain_init(void) { int ret; + tb_debugfs_init(); + tb_acpi_init(); + ret = tb_xdomain_init(); if (ret) - return ret; + goto err_acpi; ret = bus_register(&tb_bus_type); if (ret) - tb_xdomain_exit(); + goto err_xdomain; + + return 0; + +err_xdomain: + tb_xdomain_exit(); +err_acpi: + tb_acpi_exit(); + tb_debugfs_exit(); return ret; } @@ -813,6 +879,8 @@ void tb_domain_exit(void) { bus_unregister(&tb_bus_type); ida_destroy(&tb_domain_ida); - tb_switch_exit(); + tb_nvm_exit(); tb_xdomain_exit(); + tb_acpi_exit(); + tb_debugfs_exit(); } |
