diff options
Diffstat (limited to 'drivers/dma/idxd/idxd.h')
| -rw-r--r-- | drivers/dma/idxd/idxd.h | 247 |
1 files changed, 211 insertions, 36 deletions
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index 1196ab342f01..74e6695881e6 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -10,15 +10,15 @@ #include <linux/cdev.h> #include <linux/idr.h> #include <linux/pci.h> -#include <linux/ioasid.h> #include <linux/bitmap.h> #include <linux/perf_event.h> +#include <linux/iommu.h> +#include <linux/crypto.h> #include <uapi/linux/idxd.h> #include "registers.h" #define IDXD_DRIVER_VERSION "1.00" -extern struct kmem_cache *idxd_desc_pool; extern bool tc_override; struct idxd_wq; @@ -32,6 +32,7 @@ enum idxd_dev_type { IDXD_DEV_GROUP, IDXD_DEV_ENGINE, IDXD_DEV_CDEV, + IDXD_DEV_CDEV_FILE, IDXD_DEV_MAX_TYPE, }; @@ -56,11 +57,23 @@ enum idxd_type { #define IDXD_ENQCMDS_RETRIES 32 #define IDXD_ENQCMDS_MAX_RETRIES 64 +enum idxd_complete_type { + IDXD_COMPLETE_NORMAL = 0, + IDXD_COMPLETE_ABORT, + IDXD_COMPLETE_DEV_FAIL, +}; + +struct idxd_desc; + struct idxd_device_driver { const char *name; enum idxd_dev_type *type; int (*probe)(struct idxd_dev *idxd_dev); void (*remove)(struct idxd_dev *idxd_dev); + void (*desc_complete)(struct idxd_desc *desc, + enum idxd_complete_type comp_type, + bool free_desc, + void *ctx, u32 *status); struct device_driver drv; }; @@ -110,7 +123,6 @@ struct idxd_pmu { struct pmu pmu; char name[IDXD_NAME_SIZE]; - int cpu; int n_counters; int counter_width; @@ -121,12 +133,16 @@ struct idxd_pmu { unsigned long supported_filters; int n_filters; - - struct hlist_node cpuhp_node; }; #define IDXD_MAX_PRIORITY 0xf +enum { + COUNTER_FAULTS = 0, + COUNTER_FAULT_FAILS, + COUNTER_MAX +}; + enum idxd_wq_state { IDXD_WQ_DISABLED = 0, IDXD_WQ_ENABLED, @@ -136,6 +152,7 @@ enum idxd_wq_flag { WQ_FLAG_DEDICATED = 0, WQ_FLAG_BLOCK_ON_FAULT, WQ_FLAG_ATS_DISABLE, + WQ_FLAG_PRS_DISABLE, }; enum idxd_wq_type { @@ -151,7 +168,8 @@ struct idxd_cdev { int minor; }; -#define IDXD_ALLOCATED_BATCH_SIZE 128U +#define DRIVER_NAME_SIZE 128 + #define WQ_NAME_SIZE 1024 #define WQ_TYPE_SIZE 10 @@ -164,12 +182,6 @@ enum idxd_op_type { IDXD_OP_NONBLOCK = 1, }; -enum idxd_complete_type { - IDXD_COMPLETE_NORMAL = 0, - IDXD_COMPLETE_ABORT, - IDXD_COMPLETE_DEV_FAIL, -}; - struct idxd_dma_chan { struct dma_chan chan; struct idxd_wq *wq; @@ -185,6 +197,7 @@ struct idxd_wq { struct idxd_dev idxd_dev; struct idxd_cdev *idxd_cdev; struct wait_queue_head err_queue; + struct workqueue_struct *wq; struct idxd_device *idxd; int id; struct idxd_irq_entry ie; @@ -214,6 +227,12 @@ struct idxd_wq { char name[WQ_NAME_SIZE + 1]; u64 max_xfer_bytes; u32 max_batch_size; + + /* Lock to protect upasid_xa access. */ + struct mutex uc_lock; + struct xarray upasid_xa; + + char driver_name[DRIVER_NAME_SIZE + 1]; }; struct idxd_engine { @@ -232,6 +251,7 @@ struct idxd_hw { union engine_cap_reg engine_cap; struct opcap opcap; u32 cmd_cap; + union iaa_cap_reg iaa_cap; }; enum idxd_device_state { @@ -252,12 +272,41 @@ struct idxd_dma_dev { struct dma_device dma; }; +typedef int (*load_device_defaults_fn_t) (struct idxd_device *idxd); + struct idxd_driver_data { const char *name_prefix; enum idxd_type type; - struct device_type *dev_type; + const struct device_type *dev_type; int compl_size; int align; + int evl_cr_off; + int cr_status_off; + int cr_result_off; + bool user_submission_safe; + load_device_defaults_fn_t load_device_defaults; +}; + +struct idxd_evl { + /* Lock to protect event log access. */ + struct mutex lock; + void *log; + dma_addr_t dma; + /* Total size of event log = number of entries * entry size. */ + unsigned int log_size; + /* The number of entries in the event log. */ + u16 size; + unsigned long *bmap; + bool batch_fail[IDXD_MAX_BATCH_IDENT]; +}; + +struct idxd_evl_fault { + struct work_struct work; + struct idxd_wq *wq; + u8 status; + + /* make this last member always */ + struct __evl_entry entry[]; }; struct idxd_device { @@ -316,6 +365,43 @@ struct idxd_device { struct idxd_pmu *idxd_pmu; unsigned long *opcap_bmap; + struct idxd_evl *evl; + struct kmem_cache *evl_cache; + + struct dentry *dbgfs_dir; + struct dentry *dbgfs_evl_file; + + bool user_submission_safe; + + struct idxd_saved_states *idxd_saved; +}; + +struct idxd_saved_states { + struct idxd_device saved_idxd; + struct idxd_evl saved_evl; + struct idxd_engine **saved_engines; + struct idxd_wq **saved_wqs; + struct idxd_group **saved_groups; + unsigned long *saved_wq_enable_map; +}; + +static inline unsigned int evl_ent_size(struct idxd_device *idxd) +{ + return idxd->hw.gen_cap.evl_support ? + (32 * (1 << idxd->hw.gen_cap.evl_support)) : 0; +} + +static inline unsigned int evl_size(struct idxd_device *idxd) +{ + return idxd->evl->size * evl_ent_size(idxd); +} + +struct crypto_ctx { + struct acomp_req *req; + struct crypto_tfm *tfm; + dma_addr_t src_addr; + dma_addr_t dst_addr; + bool compress; }; /* IDXD software descriptor */ @@ -330,7 +416,10 @@ struct idxd_desc { struct iax_completion_record *iax_completion; }; dma_addr_t compl_dma; - struct dma_async_tx_descriptor txd; + union { + struct dma_async_tx_descriptor txd; + struct crypto_ctx crypto; + }; struct llist_node llnode; struct list_head list; int id; @@ -351,11 +440,21 @@ enum idxd_completion_status { #define engine_confdev(engine) &engine->idxd_dev.conf_dev #define group_confdev(group) &group->idxd_dev.conf_dev #define cdev_dev(cdev) &cdev->idxd_dev.conf_dev +#define user_ctx_dev(ctx) (&(ctx)->idxd_dev.conf_dev) #define confdev_to_idxd_dev(dev) container_of(dev, struct idxd_dev, conf_dev) #define idxd_dev_to_idxd(idxd_dev) container_of(idxd_dev, struct idxd_device, idxd_dev) #define idxd_dev_to_wq(idxd_dev) container_of(idxd_dev, struct idxd_wq, idxd_dev) +static inline struct idxd_device_driver *wq_to_idxd_drv(struct idxd_wq *wq) +{ + struct device *dev = wq_confdev(wq); + struct idxd_device_driver *idxd_drv = + container_of(dev->driver, struct idxd_device_driver, drv); + + return idxd_drv; +} + static inline struct idxd_device *confdev_to_idxd(struct device *dev) { struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); @@ -416,15 +515,24 @@ static inline struct idxd_device *ie_to_idxd(struct idxd_irq_entry *ie) return container_of(ie, struct idxd_device, ie); } -extern struct bus_type dsa_bus_type; +static inline void idxd_set_user_intr(struct idxd_device *idxd, bool enable) +{ + union gencfg_reg reg; + + reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); + reg.user_int_en = enable; + iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); +} + +extern const struct bus_type dsa_bus_type; extern bool support_enqcmd; extern struct ida idxd_ida; -extern struct device_type dsa_device_type; -extern struct device_type iax_device_type; -extern struct device_type idxd_wq_device_type; -extern struct device_type idxd_engine_device_type; -extern struct device_type idxd_group_device_type; +extern const struct device_type dsa_device_type; +extern const struct device_type iax_device_type; +extern const struct device_type idxd_wq_device_type; +extern const struct device_type idxd_engine_device_type; +extern const struct device_type idxd_group_device_type; static inline bool is_dsa_dev(struct idxd_dev *idxd_dev) { @@ -548,6 +656,56 @@ static inline int idxd_wq_refcount(struct idxd_wq *wq) return wq->client_count; }; +static inline void idxd_wq_set_private(struct idxd_wq *wq, void *private) +{ + dev_set_drvdata(wq_confdev(wq), private); +} + +static inline void *idxd_wq_get_private(struct idxd_wq *wq) +{ + return dev_get_drvdata(wq_confdev(wq)); +} + +/* + * Intel IAA does not support batch processing. + * The max batch size of device, max batch size of wq and + * max batch shift of wqcfg should be always 0 on IAA. + */ +static inline void idxd_set_max_batch_size(int idxd_type, struct idxd_device *idxd, + u32 max_batch_size) +{ + if (idxd_type == IDXD_TYPE_IAX) + idxd->max_batch_size = 0; + else + idxd->max_batch_size = max_batch_size; +} + +static inline void idxd_wq_set_max_batch_size(int idxd_type, struct idxd_wq *wq, + u32 max_batch_size) +{ + if (idxd_type == IDXD_TYPE_IAX) + wq->max_batch_size = 0; + else + wq->max_batch_size = max_batch_size; +} + +static inline void idxd_wqcfg_set_max_batch_shift(int idxd_type, union wqcfg *wqcfg, + u32 max_batch_shift) +{ + if (idxd_type == IDXD_TYPE_IAX) + wqcfg->max_batch_shift = 0; + else + wqcfg->max_batch_shift = max_batch_shift; +} + +static inline int idxd_wq_driver_name_match(struct idxd_wq *wq, struct device *dev) +{ + return (strncmp(wq->driver_name, dev->driver->name, strlen(dev->driver->name)) == 0); +} + +#define MODULE_ALIAS_IDXD_DEVICE(type) MODULE_ALIAS("idxd:t" __stringify(type) "*") +#define IDXD_DEVICES_MODALIAS_FMT "idxd:t%d" + int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv, struct module *module, const char *mod_name); #define idxd_driver_register(driver) \ @@ -558,14 +716,30 @@ void idxd_driver_unregister(struct idxd_device_driver *idxd_drv); #define module_idxd_driver(__idxd_driver) \ module_driver(__idxd_driver, idxd_driver_register, idxd_driver_unregister) -int idxd_register_bus_type(void); -void idxd_unregister_bus_type(void); +void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc); +void idxd_dma_complete_txd(struct idxd_desc *desc, + enum idxd_complete_type comp_type, + bool free_desc, void *ctx, u32 *status); + +static inline void idxd_desc_complete(struct idxd_desc *desc, + enum idxd_complete_type comp_type, + bool free_desc) +{ + struct idxd_device_driver *drv; + u32 status; + + drv = wq_to_idxd_drv(desc->wq); + if (drv->desc_complete) + drv->desc_complete(desc, comp_type, free_desc, + &desc->txd, &status); +} + int idxd_register_devices(struct idxd_device *idxd); void idxd_unregister_devices(struct idxd_device *idxd); -int idxd_register_driver(void); -void idxd_unregister_driver(void); void idxd_wqs_quiesce(struct idxd_device *idxd); bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc); +void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count); +int idxd_load_iaa_device_defaults(struct idxd_device *idxd); /* device interrupt control */ irqreturn_t idxd_misc_thread(int vec, void *data); @@ -574,12 +748,12 @@ void idxd_mask_error_interrupts(struct idxd_device *idxd); void idxd_unmask_error_interrupts(struct idxd_device *idxd); /* device control */ -int idxd_register_idxd_drv(void); -void idxd_unregister_idxd_drv(void); int idxd_device_drv_probe(struct idxd_dev *idxd_dev); +int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev, + const struct pci_device_id *id); void idxd_device_drv_remove(struct idxd_dev *idxd_dev); -int drv_enable_wq(struct idxd_wq *wq); -void drv_disable_wq(struct idxd_wq *wq); +int idxd_drv_enable_wq(struct idxd_wq *wq); +void idxd_drv_disable_wq(struct idxd_wq *wq); int idxd_device_init_reset(struct idxd_device *idxd); int idxd_device_enable(struct idxd_device *idxd); int idxd_device_disable(struct idxd_device *idxd); @@ -614,15 +788,11 @@ int idxd_wq_request_irq(struct idxd_wq *wq); /* submission */ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc); struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype); -void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc); int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc); /* dmaengine */ int idxd_register_dma_device(struct idxd_device *idxd); void idxd_unregister_dma_device(struct idxd_device *idxd); -void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res); -void idxd_dma_complete_txd(struct idxd_desc *desc, - enum idxd_complete_type comp_type, bool free_desc); /* cdev */ int idxd_cdev_register(void); @@ -630,20 +800,25 @@ void idxd_cdev_remove(void); int idxd_cdev_get_major(struct idxd_device *idxd); int idxd_wq_add_cdev(struct idxd_wq *wq); void idxd_wq_del_cdev(struct idxd_wq *wq); +int idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr, + void *buf, int len); +void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index); /* perfmon */ #if IS_ENABLED(CONFIG_INTEL_IDXD_PERFMON) int perfmon_pmu_init(struct idxd_device *idxd); void perfmon_pmu_remove(struct idxd_device *idxd); void perfmon_counter_overflow(struct idxd_device *idxd); -void perfmon_init(void); -void perfmon_exit(void); #else static inline int perfmon_pmu_init(struct idxd_device *idxd) { return 0; } static inline void perfmon_pmu_remove(struct idxd_device *idxd) {} static inline void perfmon_counter_overflow(struct idxd_device *idxd) {} -static inline void perfmon_init(void) {} -static inline void perfmon_exit(void) {} #endif +/* debugfs */ +int idxd_device_init_debugfs(struct idxd_device *idxd); +void idxd_device_remove_debugfs(struct idxd_device *idxd); +int idxd_init_debugfs(void); +void idxd_remove_debugfs(void); + #endif |
