diff options
Diffstat (limited to 'drivers/accel/habanalabs/common/device.c')
| -rw-r--r-- | drivers/accel/habanalabs/common/device.c | 693 |
1 files changed, 481 insertions, 212 deletions
diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c index b97339d1f7c6..999c92d7036e 100644 --- a/drivers/accel/habanalabs/common/device.c +++ b/drivers/accel/habanalabs/common/device.c @@ -14,11 +14,14 @@ #include <linux/hwmon.h> #include <linux/vmalloc.h> +#include <drm/drm_accel.h> +#include <drm/drm_drv.h> + #include <trace/events/habanalabs.h> #define HL_RESET_DELAY_USEC 10000 /* 10ms */ -#define HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC 5 +#define HL_DEVICE_RELEASE_WATCHDOG_TIMEOUT_SEC 30 enum dma_alloc_type { DMA_ALLOC_COHERENT, @@ -27,6 +30,8 @@ enum dma_alloc_type { #define MEM_SCRUB_DEFAULT_VAL 0x1122334455667788 +static void hl_device_heartbeat(struct work_struct *work); + /* * hl_set_dram_bar- sets the bar to allow later access to address * @@ -52,7 +57,8 @@ static u64 hl_set_dram_bar(struct hl_device *hdev, u64 addr, struct pci_mem_regi if (is_power_of_2(prop->dram_pci_bar_size)) bar_base_addr = addr & ~(prop->dram_pci_bar_size - 0x1ull); else - bar_base_addr = DIV_ROUND_DOWN_ULL(addr, prop->dram_pci_bar_size) * + bar_base_addr = region->region_base + + div64_u64((addr - region->region_base), prop->dram_pci_bar_size) * prop->dram_pci_bar_size; old_base = hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr); @@ -126,8 +132,8 @@ static void *hl_dma_alloc_common(struct hl_device *hdev, size_t size, dma_addr_t } if (trace_habanalabs_dma_alloc_enabled() && !ZERO_OR_NULL_PTR(ptr)) - trace_habanalabs_dma_alloc(hdev->dev, (u64) (uintptr_t) ptr, *dma_handle, size, - caller); + trace_habanalabs_dma_alloc(&(hdev)->pdev->dev, (u64) (uintptr_t) ptr, *dma_handle, + size, caller); return ptr; } @@ -148,7 +154,7 @@ static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *c break; } - trace_habanalabs_dma_free(hdev->dev, store_cpu_addr, dma_handle, size, caller); + trace_habanalabs_dma_free(&(hdev)->pdev->dev, store_cpu_addr, dma_handle, size, caller); } void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle, @@ -185,7 +191,36 @@ void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void * hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, size, vaddr); } -int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir) +int hl_dma_map_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt, + enum dma_data_direction dir, const char *caller) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct scatterlist *sg; + int rc, i; + + rc = hdev->asic_funcs->dma_map_sgtable(hdev, sgt, dir); + if (rc) + return rc; + + if (!trace_habanalabs_dma_map_page_enabled()) + return 0; + + for_each_sgtable_dma_sg(sgt, sg, i) + trace_habanalabs_dma_map_page(&(hdev)->pdev->dev, + page_to_phys(sg_page(sg)), + sg->dma_address - prop->device_dma_offset_for_host_access, +#ifdef CONFIG_NEED_SG_DMA_LENGTH + sg->dma_length, +#else + sg->length, +#endif + dir, caller); + + return 0; +} + +int hl_asic_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, + enum dma_data_direction dir) { struct asic_fixed_properties *prop = &hdev->asic_prop; struct scatterlist *sg; @@ -203,7 +238,31 @@ int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_da return 0; } -void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir) +void hl_dma_unmap_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt, + enum dma_data_direction dir, const char *caller) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct scatterlist *sg; + int i; + + hdev->asic_funcs->dma_unmap_sgtable(hdev, sgt, dir); + + if (trace_habanalabs_dma_unmap_page_enabled()) { + for_each_sgtable_dma_sg(sgt, sg, i) + trace_habanalabs_dma_unmap_page(&(hdev)->pdev->dev, + page_to_phys(sg_page(sg)), + sg->dma_address - prop->device_dma_offset_for_host_access, +#ifdef CONFIG_NEED_SG_DMA_LENGTH + sg->dma_length, +#else + sg->length, +#endif + dir, caller); + } +} + +void hl_asic_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt, + enum dma_data_direction dir) { struct asic_fixed_properties *prop = &hdev->asic_prop; struct scatterlist *sg; @@ -315,7 +374,9 @@ enum hl_device_status hl_device_status(struct hl_device *hdev) { enum hl_device_status status; - if (hdev->reset_info.in_reset) { + if (hdev->device_fini_pending) { + status = HL_DEVICE_STATUS_MALFUNCTION; + } else if (hdev->reset_info.in_reset) { if (hdev->reset_info.in_compute_reset) status = HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE; else @@ -343,9 +404,9 @@ bool hl_device_operational(struct hl_device *hdev, *status = current_status; switch (current_status) { + case HL_DEVICE_STATUS_MALFUNCTION: case HL_DEVICE_STATUS_IN_RESET: case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE: - case HL_DEVICE_STATUS_MALFUNCTION: case HL_DEVICE_STATUS_NEEDS_RESET: return false; case HL_DEVICE_STATUS_OPERATIONAL: @@ -381,16 +442,19 @@ static void print_idle_status_mask(struct hl_device *hdev, const char *message, u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE]) { if (idle_mask[3]) - dev_err(hdev->dev, "%s (mask %#llx_%016llx_%016llx_%016llx)\n", - message, idle_mask[3], idle_mask[2], idle_mask[1], idle_mask[0]); + dev_err(hdev->dev, "%s %s (mask %#llx_%016llx_%016llx_%016llx)\n", + dev_name(&hdev->pdev->dev), message, + idle_mask[3], idle_mask[2], idle_mask[1], idle_mask[0]); else if (idle_mask[2]) - dev_err(hdev->dev, "%s (mask %#llx_%016llx_%016llx)\n", - message, idle_mask[2], idle_mask[1], idle_mask[0]); + dev_err(hdev->dev, "%s %s (mask %#llx_%016llx_%016llx)\n", + dev_name(&hdev->pdev->dev), message, + idle_mask[2], idle_mask[1], idle_mask[0]); else if (idle_mask[1]) - dev_err(hdev->dev, "%s (mask %#llx_%016llx)\n", - message, idle_mask[1], idle_mask[0]); + dev_err(hdev->dev, "%s %s (mask %#llx_%016llx)\n", + dev_name(&hdev->pdev->dev), message, idle_mask[1], idle_mask[0]); else - dev_err(hdev->dev, "%s (mask %#llx)\n", message, idle_mask[0]); + dev_err(hdev->dev, "%s %s (mask %#llx)\n", dev_name(&hdev->pdev->dev), message, + idle_mask[0]); } static void hpriv_release(struct kref *ref) @@ -406,8 +470,6 @@ static void hpriv_release(struct kref *ref) hdev->asic_funcs->send_device_activity(hdev, false); - put_pid(hpriv->taskpid); - hl_debugfs_remove_file(hpriv); mutex_destroy(&hpriv->ctx_lock); @@ -424,7 +486,7 @@ static void hpriv_release(struct kref *ref) /* Check the device idle status and reset if not idle. * Skip it if already in reset, or if device is going to be reset in any case. */ - if (!hdev->reset_info.in_reset && !reset_device && hdev->pdev && !hdev->pldm) + if (!hdev->reset_info.in_reset && !reset_device && !hdev->pldm) device_is_idle = hdev->asic_funcs->is_device_idle(hdev, idle_mask, HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL); if (!device_is_idle) { @@ -446,14 +508,18 @@ static void hpriv_release(struct kref *ref) list_del(&hpriv->dev_node); mutex_unlock(&hdev->fpriv_list_lock); + put_pid(hpriv->taskpid); + if (reset_device) { hl_device_reset(hdev, HL_DRV_RESET_DEV_RELEASE); } else { /* Scrubbing is handled within hl_device_reset(), so here need to do it directly */ int rc = hdev->asic_funcs->scrub_device_mem(hdev); - if (rc) + if (rc) { dev_err(hdev->dev, "failed to scrub memory from hpriv release (%d)\n", rc); + hl_device_reset(hdev, HL_DRV_RESET_HARD); + } } /* Now we can mark the compute_ctx as not active. Even if a reset is running in a different @@ -485,7 +551,8 @@ int hl_hpriv_put(struct hl_fpriv *hpriv) return kref_put(&hpriv->refcount, hpriv_release); } -static void print_device_in_use_info(struct hl_device *hdev, const char *message) +static void print_device_in_use_info(struct hl_device *hdev, + struct hl_mem_mgr_fini_stats *mm_fini_stats, const char *message) { u32 active_cs_num, dmabuf_export_cnt; bool unknown_reason = true; @@ -509,6 +576,12 @@ static void print_device_in_use_info(struct hl_device *hdev, const char *message dmabuf_export_cnt); } + if (mm_fini_stats->n_busy_cb) { + unknown_reason = false; + offset += scnprintf(buf + offset, size - offset, " [%u live CB handles]", + mm_fini_stats->n_busy_cb); + } + if (unknown_reason) scnprintf(buf + offset, size - offset, " [unknown reason]"); @@ -516,24 +589,21 @@ static void print_device_in_use_info(struct hl_device *hdev, const char *message } /* - * hl_device_release - release function for habanalabs device - * - * @inode: pointer to inode structure - * @filp: pointer to file structure + * hl_device_release() - release function for habanalabs device. + * @ddev: pointer to DRM device structure. + * @file: pointer to DRM file private data structure. * * Called when process closes an habanalabs device */ -static int hl_device_release(struct inode *inode, struct file *filp) +void hl_device_release(struct drm_device *ddev, struct drm_file *file_priv) { - struct hl_fpriv *hpriv = filp->private_data; - struct hl_device *hdev = hpriv->hdev; - - filp->private_data = NULL; + struct hl_fpriv *hpriv = file_priv->driver_priv; + struct hl_device *hdev = to_hl_device(ddev); + struct hl_mem_mgr_fini_stats mm_fini_stats; if (!hdev) { pr_crit("Closing FD after device was removed. Memory leak will occur and it is advised to reboot.\n"); put_pid(hpriv->taskpid); - return 0; } hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr); @@ -541,18 +611,17 @@ static int hl_device_release(struct inode *inode, struct file *filp) /* Memory buffers might be still in use at this point and thus the handles IDR destruction * is postponed to hpriv_release(). */ - hl_mem_mgr_fini(&hpriv->mem_mgr); + hl_mem_mgr_fini(&hpriv->mem_mgr, &mm_fini_stats); hdev->compute_ctx_in_release = 1; if (!hl_hpriv_put(hpriv)) { - print_device_in_use_info(hdev, "User process closed FD but device still in use"); + print_device_in_use_info(hdev, &mm_fini_stats, + "User process closed FD but device still in use"); hl_device_reset(hdev, HL_DRV_RESET_HARD); } hdev->last_open_session_duration_jif = jiffies - hdev->last_successful_open_jif; - - return 0; } static int hl_device_release_ctrl(struct inode *inode, struct file *filp) @@ -571,11 +640,6 @@ static int hl_device_release_ctrl(struct inode *inode, struct file *filp) list_del(&hpriv->dev_node); mutex_unlock(&hdev->fpriv_ctrl_list_lock); out: - /* release the eventfd */ - if (hpriv->notifier_event.eventfd) - eventfd_ctx_put(hpriv->notifier_event.eventfd); - - mutex_destroy(&hpriv->notifier_event.lock); put_pid(hpriv->taskpid); kfree(hpriv); @@ -583,18 +647,8 @@ out: return 0; } -/* - * hl_mmap - mmap function for habanalabs device - * - * @*filp: pointer to file structure - * @*vma: pointer to vm_area_struct of the process - * - * Called when process does an mmap on habanalabs device. Call the relevant mmap - * function at the end of the common code. - */ -static int hl_mmap(struct file *filp, struct vm_area_struct *vma) +static int __hl_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma) { - struct hl_fpriv *hpriv = filp->private_data; struct hl_device *hdev = hpriv->hdev; unsigned long vm_pgoff; @@ -617,14 +671,22 @@ static int hl_mmap(struct file *filp, struct vm_area_struct *vma) return -EINVAL; } -static const struct file_operations hl_ops = { - .owner = THIS_MODULE, - .open = hl_device_open, - .release = hl_device_release, - .mmap = hl_mmap, - .unlocked_ioctl = hl_ioctl, - .compat_ioctl = hl_ioctl -}; +/* + * hl_mmap - mmap function for habanalabs device + * + * @*filp: pointer to file structure + * @*vma: pointer to vm_area_struct of the process + * + * Called when process does an mmap on habanalabs device. Call the relevant mmap + * function at the end of the common code. + */ +int hl_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_file *file_priv = filp->private_data; + struct hl_fpriv *hpriv = file_priv->driver_priv; + + return __hl_mmap(hpriv, vma); +} static const struct file_operations hl_ctrl_ops = { .owner = THIS_MODULE, @@ -645,14 +707,14 @@ static void device_release_func(struct device *dev) * @hdev: pointer to habanalabs device structure * @class: pointer to the class object of the device * @minor: minor number of the specific device - * @fpos: file operations to install for this device + * @fops: file operations to install for this device * @name: name of the device as it will appear in the filesystem * @cdev: pointer to the char device object that will be initialized * @dev: pointer to the device object that will be initialized * * Initialize a cdev and a Linux device for habanalabs's device. */ -static int device_init_cdev(struct hl_device *hdev, struct class *class, +static int device_init_cdev(struct hl_device *hdev, const struct class *class, int minor, const struct file_operations *fops, char *name, struct cdev *cdev, struct device **dev) @@ -676,23 +738,26 @@ static int device_init_cdev(struct hl_device *hdev, struct class *class, static int cdev_sysfs_debugfs_add(struct hl_device *hdev) { + const struct class *accel_class = hdev->drm.accel->kdev->class; + char name[32]; int rc; - rc = cdev_device_add(&hdev->cdev, hdev->dev); - if (rc) { - dev_err(hdev->dev, - "failed to add a char device to the system\n"); + hdev->cdev_idx = hdev->drm.accel->index; + + /* Initialize cdev and device structures for the control device */ + snprintf(name, sizeof(name), "accel_controlD%d", hdev->cdev_idx); + rc = device_init_cdev(hdev, accel_class, hdev->cdev_idx, &hl_ctrl_ops, name, + &hdev->cdev_ctrl, &hdev->dev_ctrl); + if (rc) return rc; - } rc = cdev_device_add(&hdev->cdev_ctrl, hdev->dev_ctrl); if (rc) { - dev_err(hdev->dev, - "failed to add a control char device to the system\n"); - goto delete_cdev_device; + dev_err(hdev->dev_ctrl, + "failed to add an accel control char device to the system\n"); + goto free_ctrl_device; } - /* hl_sysfs_init() must be done after adding the device to the system */ rc = hl_sysfs_init(hdev); if (rc) { dev_err(hdev->dev, "failed to initialize sysfs\n"); @@ -707,23 +772,19 @@ static int cdev_sysfs_debugfs_add(struct hl_device *hdev) delete_ctrl_cdev_device: cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl); -delete_cdev_device: - cdev_device_del(&hdev->cdev, hdev->dev); +free_ctrl_device: + put_device(hdev->dev_ctrl); return rc; } static void cdev_sysfs_debugfs_remove(struct hl_device *hdev) { if (!hdev->cdev_sysfs_debugfs_created) - goto put_devices; + return; - hl_debugfs_remove_device(hdev); hl_sysfs_fini(hdev); - cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl); - cdev_device_del(&hdev->cdev, hdev->dev); -put_devices: - put_device(hdev->dev); + cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl); put_device(hdev->dev_ctrl); } @@ -756,7 +817,7 @@ static void device_hard_reset_pending(struct work_struct *work) } queue_delayed_work(hdev->reset_wq, &device_reset_work->reset_work, - msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000)); + secs_to_jiffies(HL_PENDING_RESET_PER_SEC)); } } @@ -808,6 +869,13 @@ static int device_early_init(struct hl_device *hdev) gaudi2_set_asic_funcs(hdev); strscpy(hdev->asic_name, "GAUDI2B", sizeof(hdev->asic_name)); break; + case ASIC_GAUDI2C: + gaudi2_set_asic_funcs(hdev); + strscpy(hdev->asic_name, "GAUDI2C", sizeof(hdev->asic_name)); + break; + case ASIC_GAUDI2D: + gaudi2_set_asic_funcs(hdev); + strscpy(hdev->asic_name, "GAUDI2D", sizeof(hdev->asic_name)); break; default: dev_err(hdev->dev, "Unrecognized ASIC type %d\n", @@ -897,6 +965,8 @@ static int device_early_init(struct hl_device *hdev) goto free_cb_mgr; } + INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat); + INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work, device_hard_reset_pending); hdev->device_reset_work.hdev = hdev; hdev->device_fini_pending = 0; @@ -919,7 +989,7 @@ static int device_early_init(struct hl_device *hdev) return 0; free_cb_mgr: - hl_mem_mgr_fini(&hdev->kernel_mem_mgr); + hl_mem_mgr_fini(&hdev->kernel_mem_mgr, NULL); hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr); free_chip_info: kfree(hdev->hl_chip_info); @@ -963,7 +1033,7 @@ static void device_early_fini(struct hl_device *hdev) mutex_destroy(&hdev->clk_throttling.lock); - hl_mem_mgr_fini(&hdev->kernel_mem_mgr); + hl_mem_mgr_fini(&hdev->kernel_mem_mgr, NULL); hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr); kfree(hdev->hl_chip_info); @@ -986,14 +1056,46 @@ static void device_early_fini(struct hl_device *hdev) static bool is_pci_link_healthy(struct hl_device *hdev) { - u16 vendor_id; + u16 device_id; if (!hdev->pdev) return false; - pci_read_config_word(hdev->pdev, PCI_VENDOR_ID, &vendor_id); + pci_read_config_word(hdev->pdev, PCI_DEVICE_ID, &device_id); + + return (device_id == hdev->pdev->device); +} + +static bool hl_device_eq_heartbeat_received(struct hl_device *hdev) +{ + struct eq_heartbeat_debug_info *heartbeat_debug_info = &hdev->heartbeat_debug_info; + u32 cpu_q_id = heartbeat_debug_info->cpu_queue_id, pq_pi_mask = (HL_QUEUE_LENGTH << 1) - 1; + struct asic_fixed_properties *prop = &hdev->asic_prop; + + if (!prop->cpucp_info.eq_health_check_supported) + return true; + + if (!hdev->eq_heartbeat_received) { + dev_err(hdev->dev, "EQ heartbeat event was not received!\n"); + + dev_err(hdev->dev, + "EQ: {CI %u, HB counter %u, last HB time: %ptTs}, PQ: {PI: %u, CI: %u (%u), last HB time: %ptTs}\n", + hdev->event_queue.ci, + heartbeat_debug_info->heartbeat_event_counter, + &hdev->heartbeat_debug_info.last_eq_heartbeat_ts, + hdev->kernel_queues[cpu_q_id].pi, + atomic_read(&hdev->kernel_queues[cpu_q_id].ci), + atomic_read(&hdev->kernel_queues[cpu_q_id].ci) & pq_pi_mask, + &hdev->heartbeat_debug_info.last_pq_heartbeat_ts); + + hl_eq_dump(hdev, &hdev->event_queue); + + return false; + } + + hdev->eq_heartbeat_received = false; - return (vendor_id == PCI_VENDOR_ID_HABANALABS); + return true; } static void hl_device_heartbeat(struct work_struct *work) @@ -1003,10 +1105,16 @@ static void hl_device_heartbeat(struct work_struct *work) struct hl_info_fw_err_info info = {0}; u64 event_mask = HL_NOTIFIER_EVENT_DEVICE_RESET | HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE; - if (!hl_device_operational(hdev, NULL)) + /* Start heartbeat checks only after driver has enabled events from FW */ + if (!hl_device_operational(hdev, NULL) || !hdev->init_done) goto reschedule; - if (!hdev->asic_funcs->send_heartbeat(hdev)) + /* + * For EQ health check need to check if driver received the heartbeat eq event + * in order to validate the eq is working. + * Only if both the EQ is healthy and we managed to send the next heartbeat reschedule. + */ + if (hl_device_eq_heartbeat_received(hdev) && (!hdev->asic_funcs->send_heartbeat(hdev))) goto reschedule; if (hl_device_operational(hdev, NULL)) @@ -1060,13 +1168,6 @@ static int device_late_init(struct hl_device *hdev) } hdev->high_pll = hdev->asic_prop.high_pll; - - if (hdev->heartbeat) { - INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat); - schedule_delayed_work(&hdev->work_heartbeat, - usecs_to_jiffies(HL_HEARTBEAT_PER_USEC)); - } - hdev->late_init_done = true; return 0; @@ -1083,9 +1184,6 @@ static void device_late_fini(struct hl_device *hdev) if (!hdev->late_init_done) return; - if (hdev->heartbeat) - cancel_delayed_work_sync(&hdev->work_heartbeat); - if (hdev->asic_funcs->late_fini) hdev->asic_funcs->late_fini(hdev); @@ -1186,8 +1284,12 @@ static void hl_abort_waiting_for_completions(struct hl_device *hdev) static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset, bool skip_wq_flush) { - if (hard_reset) + if (hard_reset) { + if (hdev->heartbeat) + cancel_delayed_work_sync(&hdev->work_heartbeat); + device_late_fini(hdev); + } /* * Halt the engines and disable interrupts so we won't get any more @@ -1302,18 +1404,18 @@ disable_device: static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool control_dev) { struct task_struct *task = NULL; - struct list_head *fd_list; - struct hl_fpriv *hpriv; - struct mutex *fd_lock; + struct list_head *hpriv_list; + struct hl_fpriv *hpriv; + struct mutex *hpriv_lock; u32 pending_cnt; - fd_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock; - fd_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list; + hpriv_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock; + hpriv_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list; /* Giving time for user to close FD, and for processes that are inside * hl_device_open to finish */ - if (!list_empty(fd_list)) + if (!list_empty(hpriv_list)) ssleep(1); if (timeout) { @@ -1329,12 +1431,12 @@ static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool } } - mutex_lock(fd_lock); + mutex_lock(hpriv_lock); /* This section must be protected because we are dereferencing * pointers that are freed if the process exits */ - list_for_each_entry(hpriv, fd_list, dev_node) { + list_for_each_entry(hpriv, hpriv_list, dev_node) { task = get_pid_task(hpriv->taskpid, PIDTYPE_PID); if (task) { dev_info(hdev->dev, "Killing user process pid=%d\n", @@ -1344,17 +1446,13 @@ static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool put_task_struct(task); } else { - /* - * If we got here, it means that process was killed from outside the driver - * right after it started looping on fd_list and before get_pid_task, thus - * we don't need to kill it. - */ dev_dbg(hdev->dev, - "Can't get task struct for user process, assuming process was killed from outside the driver\n"); + "Can't get task struct for user process %d, process was killed from outside the driver\n", + pid_nr(hpriv->taskpid)); } } - mutex_unlock(fd_lock); + mutex_unlock(hpriv_lock); /* * We killed the open users, but that doesn't mean they are closed. @@ -1366,7 +1464,7 @@ static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool */ wait_for_processes: - while ((!list_empty(fd_list)) && (pending_cnt)) { + while ((!list_empty(hpriv_list)) && (pending_cnt)) { dev_dbg(hdev->dev, "Waiting for all unmap operations to finish before hard reset\n"); @@ -1376,7 +1474,7 @@ wait_for_processes: } /* All processes exited successfully */ - if (list_empty(fd_list)) + if (list_empty(hpriv_list)) return 0; /* Give up waiting for processes to exit */ @@ -1390,17 +1488,17 @@ wait_for_processes: static void device_disable_open_processes(struct hl_device *hdev, bool control_dev) { - struct list_head *fd_list; + struct list_head *hpriv_list; struct hl_fpriv *hpriv; - struct mutex *fd_lock; + struct mutex *hpriv_lock; - fd_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock; - fd_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list; + hpriv_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock; + hpriv_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list; - mutex_lock(fd_lock); - list_for_each_entry(hpriv, fd_list, dev_node) + mutex_lock(hpriv_lock); + list_for_each_entry(hpriv, hpriv_list, dev_node) hpriv->hdev = NULL; - mutex_unlock(fd_lock); + mutex_unlock(hpriv_lock); } static void send_disable_pci_access(struct hl_device *hdev, u32 flags) @@ -1419,15 +1517,14 @@ static void send_disable_pci_access(struct hl_device *hdev, u32 flags) * of heartbeat, the device CPU is marked as disable * so this message won't be sent */ - if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0)) { - dev_warn(hdev->dev, "Failed to disable FW's PCI access\n"); + if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0)) return; - } - /* verify that last EQs are handled before disabled is set */ + /* disable_irq also generates sync irq, this verifies that last EQs are handled + * before disabled is set. The IRQ will be enabled again in request_irq call. + */ if (hdev->cpu_queues_enable) - synchronize_irq(pci_irq_vector(hdev->pdev, - hdev->asic_prop.eq_interrupt_id)); + disable_irq(pci_irq_vector(hdev->pdev, hdev->asic_prop.eq_interrupt_id)); } } @@ -1471,6 +1568,31 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags) } } +static void reset_heartbeat_debug_info(struct hl_device *hdev) +{ + hdev->heartbeat_debug_info.last_pq_heartbeat_ts = 0; + hdev->heartbeat_debug_info.last_eq_heartbeat_ts = 0; + hdev->heartbeat_debug_info.heartbeat_event_counter = 0; +} + +static inline void device_heartbeat_schedule(struct hl_device *hdev) +{ + if (!hdev->heartbeat) + return; + + reset_heartbeat_debug_info(hdev); + + /* + * Before scheduling the heartbeat driver will check if eq event has received. + * for the first schedule we need to set the indication as true then for the next + * one this indication will be true only if eq event was sent by FW. + */ + hdev->eq_heartbeat_received = true; + + schedule_delayed_work(&hdev->work_heartbeat, + usecs_to_jiffies(HL_HEARTBEAT_PER_USEC)); +} + /* * hl_device_reset - reset the device * @@ -1508,6 +1630,11 @@ int hl_device_reset(struct hl_device *hdev, u32 flags) from_watchdog_thread = !!(flags & HL_DRV_RESET_FROM_WD_THR); reset_upon_device_release = hdev->reset_upon_device_release && from_dev_release; + if (hdev->cpld_shutdown) { + dev_err(hdev->dev, "Cannot reset device, cpld is shutdown! Device is NOT usable\n"); + return -EIO; + } + if (!hard_reset && (hl_device_status(hdev) == HL_DEVICE_STATUS_MALFUNCTION)) { dev_dbg(hdev->dev, "soft-reset isn't supported on a malfunctioning device\n"); return 0; @@ -1693,14 +1820,16 @@ kill_processes: hdev->device_cpu_disabled = false; hdev->reset_info.hard_reset_pending = false; + /* + * Put the device in an unusable state if there are 2 back to back resets due to + * fatal errors. + */ if (hdev->reset_info.reset_trigger_repeated && - (hdev->reset_info.prev_reset_trigger == - HL_DRV_RESET_FW_FATAL_ERR)) { - /* if there 2 back to back resets from FW, - * ensure driver puts the driver in a unusable state - */ + (hdev->reset_info.prev_reset_trigger == HL_DRV_RESET_FW_FATAL_ERR || + hdev->reset_info.prev_reset_trigger == + HL_DRV_RESET_HEARTBEAT)) { dev_crit(hdev->dev, - "%s Consecutive FW fatal errors received, stopping hard reset\n", + "%s Consecutive fatal errors, stopping hard reset\n", dev_name(&(hdev)->pdev->dev)); rc = -EIO; goto out_err; @@ -1838,6 +1967,8 @@ kill_processes: if (hard_reset) { hdev->reset_info.hard_reset_cnt++; + device_heartbeat_schedule(hdev); + /* After reset is done, we are ready to receive events from * the F/W. We can't do it before because we will ignore events * and if those events are fatal, we won't know about it and @@ -1916,7 +2047,16 @@ int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask) } ctx = hl_get_compute_ctx(hdev); - if (!ctx || !ctx->hpriv->notifier_event.eventfd) + if (!ctx) + goto device_reset; + + /* + * There is no point in postponing the reset if user is not registered for events. + * However if no eventfd_ctx exists but the device release watchdog is already scheduled, it + * just implies that user has unregistered as part of handling a previous event. In this + * case an immediate reset is not required. + */ + if (!ctx->hpriv->notifier_event.eventfd && !hdev->reset_info.watchdog_active) goto device_reset; /* Schedule the device release watchdog work unless reset is already in progress or if the @@ -1928,14 +2068,16 @@ int hl_device_cond_reset(struct hl_device *hdev, u32 flags, u64 event_mask) goto device_reset; } - if (hdev->reset_info.watchdog_active) + if (hdev->reset_info.watchdog_active) { + hdev->device_release_watchdog_work.flags |= flags; goto out; + } hdev->device_release_watchdog_work.flags = flags; dev_dbg(hdev->dev, "Device is going to be hard-reset in %u sec unless being released\n", hdev->device_release_watchdog_timeout_sec); schedule_delayed_work(&hdev->device_release_watchdog_work.reset_work, - msecs_to_jiffies(hdev->device_release_watchdog_timeout_sec * 1000)); + secs_to_jiffies(hdev->device_release_watchdog_timeout_sec)); hdev->reset_info.watchdog_active = 1; out: spin_unlock(&hdev->reset_info.lock); @@ -1954,7 +2096,7 @@ device_reset: if (ctx) hl_ctx_put(ctx); - return hl_device_reset(hdev, flags); + return hl_device_reset(hdev, flags | HL_DRV_RESET_HARD); } static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event_mask) @@ -1963,7 +2105,7 @@ static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 notifier_event->events_mask |= event_mask; if (notifier_event->eventfd) - eventfd_signal(notifier_event->eventfd, 1); + eventfd_signal(notifier_event->eventfd); mutex_unlock(¬ifier_event->lock); } @@ -1990,59 +2132,6 @@ void hl_notifier_event_send_all(struct hl_device *hdev, u64 event_mask) hl_notifier_event_send(&hpriv->notifier_event, event_mask); mutex_unlock(&hdev->fpriv_list_lock); - - /* control device */ - mutex_lock(&hdev->fpriv_ctrl_list_lock); - - list_for_each_entry(hpriv, &hdev->fpriv_ctrl_list, dev_node) - hl_notifier_event_send(&hpriv->notifier_event, event_mask); - - mutex_unlock(&hdev->fpriv_ctrl_list_lock); -} - -static int create_cdev(struct hl_device *hdev) -{ - char *name; - int rc; - - hdev->cdev_idx = hdev->id / 2; - - name = kasprintf(GFP_KERNEL, "hl%d", hdev->cdev_idx); - if (!name) { - rc = -ENOMEM; - goto out_err; - } - - /* Initialize cdev and device structures */ - rc = device_init_cdev(hdev, hdev->hclass, hdev->id, &hl_ops, name, - &hdev->cdev, &hdev->dev); - - kfree(name); - - if (rc) - goto out_err; - - name = kasprintf(GFP_KERNEL, "hl_controlD%d", hdev->cdev_idx); - if (!name) { - rc = -ENOMEM; - goto free_dev; - } - - /* Initialize cdev and device structures for control device */ - rc = device_init_cdev(hdev, hdev->hclass, hdev->id_control, &hl_ctrl_ops, - name, &hdev->cdev_ctrl, &hdev->dev_ctrl); - - kfree(name); - - if (rc) - goto free_dev; - - return 0; - -free_dev: - put_device(hdev->dev); -out_err: - return rc; } /* @@ -2057,16 +2146,14 @@ out_err: int hl_device_init(struct hl_device *hdev) { int i, rc, cq_cnt, user_interrupt_cnt, cq_ready_cnt; + struct hl_ts_free_jobs *free_jobs_data; bool expose_interfaces_on_err = false; - - rc = create_cdev(hdev); - if (rc) - goto out_disabled; + void *p; /* Initialize ASIC function pointers and perform early init */ rc = device_early_init(hdev); if (rc) - goto free_dev; + goto out_disabled; user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count + hdev->asic_prop.user_interrupt_count; @@ -2078,15 +2165,43 @@ int hl_device_init(struct hl_device *hdev) rc = -ENOMEM; goto early_fini; } + + /* Timestamp records supported only if CQ supported in device */ + if (hdev->asic_prop.first_available_cq[0] != USHRT_MAX) { + for (i = 0 ; i < user_interrupt_cnt ; i++) { + p = vzalloc(TIMESTAMP_FREE_NODES_NUM * + sizeof(struct timestamp_reg_free_node)); + if (!p) { + rc = -ENOMEM; + goto free_usr_intr_mem; + } + free_jobs_data = &hdev->user_interrupt[i].ts_free_jobs_data; + free_jobs_data->free_nodes_pool = p; + free_jobs_data->free_nodes_length = TIMESTAMP_FREE_NODES_NUM; + free_jobs_data->next_avail_free_node_idx = 0; + } + } + } + + free_jobs_data = &hdev->common_user_cq_interrupt.ts_free_jobs_data; + p = vzalloc(TIMESTAMP_FREE_NODES_NUM * + sizeof(struct timestamp_reg_free_node)); + if (!p) { + rc = -ENOMEM; + goto free_usr_intr_mem; } + free_jobs_data->free_nodes_pool = p; + free_jobs_data->free_nodes_length = TIMESTAMP_FREE_NODES_NUM; + free_jobs_data->next_avail_free_node_idx = 0; + /* * Start calling ASIC initialization. First S/W then H/W and finally * late init */ rc = hdev->asic_funcs->sw_init(hdev); if (rc) - goto free_usr_intr_mem; + goto free_common_usr_intr_mem; /* initialize completion structure for multi CS wait */ @@ -2253,6 +2368,14 @@ int hl_device_init(struct hl_device *hdev) * From here there is no need to expose them in case of an error. */ expose_interfaces_on_err = false; + + rc = drm_dev_register(&hdev->drm, 0); + if (rc) { + dev_err(hdev->dev, "Failed to register DRM device, rc %d\n", rc); + rc = 0; + goto out_disabled; + } + rc = cdev_sysfs_debugfs_add(hdev); if (rc) { dev_err(hdev->dev, "Failed to add char devices and sysfs/debugfs files\n"); @@ -2280,12 +2403,16 @@ int hl_device_init(struct hl_device *hdev) goto out_disabled; } + /* Scheduling the EQ heartbeat thread must come after driver is done with all + * initializations, as we want to make sure the FW gets enough time to be prepared + * to respond to heartbeat packets. + */ + device_heartbeat_schedule(hdev); + dev_notice(hdev->dev, "Successfully added device %s to habanalabs driver\n", dev_name(&(hdev)->pdev->dev)); - hdev->init_done = true; - /* After initialization is done, we are ready to receive events from * the F/W. We can't do it before because we will ignore events and if * those events are fatal, we won't know about it and the device will @@ -2293,6 +2420,8 @@ int hl_device_init(struct hl_device *hdev) */ hdev->asic_funcs->enable_events_from_fw(hdev); + hdev->init_done = true; + return 0; cb_pool_fini: @@ -2317,19 +2446,27 @@ hw_queues_destroy: hl_hw_queues_destroy(hdev); sw_fini: hdev->asic_funcs->sw_fini(hdev); +free_common_usr_intr_mem: + vfree(hdev->common_user_cq_interrupt.ts_free_jobs_data.free_nodes_pool); free_usr_intr_mem: - kfree(hdev->user_interrupt); + if (user_interrupt_cnt) { + for (i = 0 ; i < user_interrupt_cnt ; i++) { + if (!hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool) + break; + vfree(hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool); + } + kfree(hdev->user_interrupt); + } early_fini: device_early_fini(hdev); -free_dev: - put_device(hdev->dev_ctrl); - put_device(hdev->dev); out_disabled: hdev->disabled = true; - if (expose_interfaces_on_err) + if (expose_interfaces_on_err) { + drm_dev_register(&hdev->drm, 0); cdev_sysfs_debugfs_add(hdev); - dev_err(&hdev->pdev->dev, - "Failed to initialize hl%d. Device %s is NOT usable !\n", + } + + pr_err("Failed to initialize accel%d. Device %s is NOT usable!\n", hdev->cdev_idx, dev_name(&hdev->pdev->dev)); return rc; @@ -2344,12 +2481,13 @@ out_disabled: */ void hl_device_fini(struct hl_device *hdev) { + u32 user_interrupt_cnt; bool device_in_reset; ktime_t timeout; u64 reset_sec; int i, rc; - dev_info(hdev->dev, "Removing device\n"); + dev_info(hdev->dev, "Removing device %s\n", dev_name(&(hdev)->pdev->dev)); hdev->device_fini_pending = 1; flush_delayed_work(&hdev->device_reset_work.reset_work); @@ -2425,14 +2563,14 @@ void hl_device_fini(struct hl_device *hdev) hdev->process_kill_trial_cnt = 0; rc = device_kill_open_processes(hdev, HL_WAIT_PROCESS_KILL_ON_DEVICE_FINI, false); if (rc) { - dev_crit(hdev->dev, "Failed to kill all open processes\n"); + dev_crit(hdev->dev, "Failed to kill all open processes (%d)\n", rc); device_disable_open_processes(hdev, false); } hdev->process_kill_trial_cnt = 0; rc = device_kill_open_processes(hdev, 0, true); if (rc) { - dev_crit(hdev->dev, "Failed to kill all control device open processes\n"); + dev_crit(hdev->dev, "Failed to kill all control device open processes (%d)\n", rc); device_disable_open_processes(hdev, true); } @@ -2443,6 +2581,14 @@ void hl_device_fini(struct hl_device *hdev) if (rc) dev_err(hdev->dev, "hw_fini failed in device fini while removing device %d\n", rc); + /* Reset the H/W (if it accessible). It will be in idle state after this returns */ + if (!hdev->cpld_shutdown) { + rc = hdev->asic_funcs->hw_fini(hdev, true, false); + if (rc) + dev_err(hdev->dev, + "hw_fini failed in device fini while removing device %d\n", rc); + } + hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE; /* Release kernel context */ @@ -2464,7 +2610,20 @@ void hl_device_fini(struct hl_device *hdev) for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) hl_cq_fini(hdev, &hdev->completion_queue[i]); kfree(hdev->completion_queue); - kfree(hdev->user_interrupt); + + user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count + + hdev->asic_prop.user_interrupt_count; + + if (user_interrupt_cnt) { + if (hdev->asic_prop.first_available_cq[0] != USHRT_MAX) { + for (i = 0 ; i < user_interrupt_cnt ; i++) + vfree(hdev->user_interrupt[i].ts_free_jobs_data.free_nodes_pool); + } + + kfree(hdev->user_interrupt); + } + + vfree(hdev->common_user_cq_interrupt.ts_free_jobs_data.free_nodes_pool); hl_hw_queues_destroy(hdev); @@ -2475,6 +2634,7 @@ void hl_device_fini(struct hl_device *hdev) /* Hide devices and sysfs/debugfs files from user */ cdev_sysfs_debugfs_remove(hdev); + drm_dev_unregister(&hdev->drm); hl_debugfs_device_fini(hdev); @@ -2499,7 +2659,7 @@ inline u32 hl_rreg(struct hl_device *hdev, u32 reg) u32 val = readl(hdev->rmmio + reg); if (unlikely(trace_habanalabs_rreg32_enabled())) - trace_habanalabs_rreg32(hdev->dev, reg, val); + trace_habanalabs_rreg32(&(hdev)->pdev->dev, reg, val); return val; } @@ -2517,7 +2677,7 @@ inline u32 hl_rreg(struct hl_device *hdev, u32 reg) inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val) { if (unlikely(trace_habanalabs_wreg32_enabled())) - trace_habanalabs_wreg32(hdev->dev, reg, val); + trace_habanalabs_wreg32(&(hdev)->pdev->dev, reg, val); writel(val, hdev->rmmio + reg); } @@ -2690,6 +2850,20 @@ void hl_handle_fw_err(struct hl_device *hdev, struct hl_info_fw_err_info *info) *info->event_mask |= HL_NOTIFIER_EVENT_CRITICL_FW_ERR; } +void hl_capture_engine_err(struct hl_device *hdev, u16 engine_id, u16 error_count) +{ + struct engine_err_info *info = &hdev->captured_err_info.engine_err; + + /* Capture only the first engine error */ + if (atomic_cmpxchg(&info->event_detected, 0, 1)) + return; + + info->event.timestamp = ktime_to_ns(ktime_get()); + info->event.engine_id = engine_id; + info->event.error_count = error_count; + info->event_info_available = true; +} + void hl_enable_err_info_capture(struct hl_error_info *captured_err_info) { vfree(captured_err_info->page_fault_info.user_mappings); @@ -2697,3 +2871,98 @@ void hl_enable_err_info_capture(struct hl_error_info *captured_err_info) atomic_set(&captured_err_info->cs_timeout.write_enable, 1); captured_err_info->undef_opcode.write_enable = true; } + +void hl_init_cpu_for_irq(struct hl_device *hdev) +{ +#ifdef CONFIG_NUMA + struct cpumask *available_mask = &hdev->irq_affinity_mask; + int numa_node = hdev->pdev->dev.numa_node, i; + static struct cpumask cpu_mask; + + if (numa_node < 0) + return; + + if (!cpumask_and(&cpu_mask, cpumask_of_node(numa_node), cpu_online_mask)) { + dev_err(hdev->dev, "No available affinities in current numa node\n"); + return; + } + + /* Remove HT siblings */ + for_each_cpu(i, &cpu_mask) + cpumask_set_cpu(cpumask_first(topology_sibling_cpumask(i)), available_mask); +#endif +} + +void hl_set_irq_affinity(struct hl_device *hdev, int irq) +{ + if (cpumask_empty(&hdev->irq_affinity_mask)) { + dev_dbg(hdev->dev, "affinity mask is empty\n"); + return; + } + + if (irq_set_affinity_and_hint(irq, &hdev->irq_affinity_mask)) + dev_err(hdev->dev, "Failed setting irq %d affinity\n", irq); +} + +void hl_eq_heartbeat_event_handle(struct hl_device *hdev) +{ + hdev->heartbeat_debug_info.heartbeat_event_counter++; + hdev->heartbeat_debug_info.last_eq_heartbeat_ts = ktime_get_real_seconds(); + hdev->eq_heartbeat_received = true; +} + +void hl_handle_clk_change_event(struct hl_device *hdev, u16 event_type, u64 *event_mask) +{ + struct hl_clk_throttle *clk_throttle = &hdev->clk_throttling; + ktime_t zero_time = ktime_set(0, 0); + + mutex_lock(&clk_throttle->lock); + + switch (event_type) { + case EQ_EVENT_POWER_EVT_START: + clk_throttle->current_reason |= HL_CLK_THROTTLE_POWER; + clk_throttle->aggregated_reason |= HL_CLK_THROTTLE_POWER; + clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get(); + clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time; + dev_dbg_ratelimited(hdev->dev, "Clock throttling due to power consumption\n"); + break; + + case EQ_EVENT_POWER_EVT_END: + clk_throttle->current_reason &= ~HL_CLK_THROTTLE_POWER; + clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get(); + dev_dbg_ratelimited(hdev->dev, "Power envelop is safe, back to optimal clock\n"); + break; + + case EQ_EVENT_THERMAL_EVT_START: + clk_throttle->current_reason |= HL_CLK_THROTTLE_THERMAL; + clk_throttle->aggregated_reason |= HL_CLK_THROTTLE_THERMAL; + clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get(); + clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time; + *event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; + dev_info_ratelimited(hdev->dev, "Clock throttling due to overheating\n"); + break; + + case EQ_EVENT_THERMAL_EVT_END: + clk_throttle->current_reason &= ~HL_CLK_THROTTLE_THERMAL; + clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get(); + *event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; + dev_info_ratelimited(hdev->dev, "Thermal envelop is safe, back to optimal clock\n"); + break; + + default: + dev_err(hdev->dev, "Received invalid clock change event %d\n", event_type); + break; + } + + mutex_unlock(&clk_throttle->lock); +} + +void hl_eq_cpld_shutdown_event_handle(struct hl_device *hdev, u16 event_id, u64 *event_mask) +{ + hl_handle_critical_hw_err(hdev, event_id, event_mask); + *event_mask |= HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE; + + /* Avoid any new accesses to the H/W */ + hdev->disabled = true; + hdev->cpld_shutdown = true; +} |
