diff options
Diffstat (limited to 'drivers/virtio/virtio_pci_modern.c')
| -rw-r--r-- | drivers/virtio/virtio_pci_modern.c | 755 |
1 files changed, 745 insertions, 10 deletions
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index d6bb68ba84e5..dd0e65f71d41 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -15,15 +15,353 @@ */ #include <linux/delay.h> +#include <linux/virtio_pci_admin.h> #define VIRTIO_PCI_NO_LEGACY #define VIRTIO_RING_NO_LEGACY #include "virtio_pci_common.h" -static u64 vp_get_features(struct virtio_device *vdev) +#define VIRTIO_AVQ_SGS_MAX 4 + +static void vp_get_features(struct virtio_device *vdev, u64 *features) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - return vp_modern_get_features(&vp_dev->mdev); + vp_modern_get_extended_features(&vp_dev->mdev, features); +} + +static int vp_avq_index(struct virtio_device *vdev, u16 *index, u16 *num) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + *num = 0; + if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) + return 0; + + *num = vp_modern_avq_num(&vp_dev->mdev); + if (!(*num)) + return -EINVAL; + *index = vp_modern_avq_index(&vp_dev->mdev); + return 0; +} + +void vp_modern_avq_done(struct virtqueue *vq) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); + struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq; + unsigned int status_size = sizeof(struct virtio_admin_cmd_status); + struct virtio_admin_cmd *cmd; + unsigned long flags; + unsigned int len; + + spin_lock_irqsave(&admin_vq->lock, flags); + do { + virtqueue_disable_cb(vq); + while ((cmd = virtqueue_get_buf(vq, &len))) { + /* If the number of bytes written by the device is less + * than the size of struct virtio_admin_cmd_status, the + * remaining status bytes will remain zero-initialized, + * since the buffer was zeroed during allocation. + * In this case, set the size of command_specific_result + * to 0. + */ + if (len < status_size) + cmd->result_sg_size = 0; + else + cmd->result_sg_size = len - status_size; + complete(&cmd->completion); + } + } while (!virtqueue_enable_cb(vq)); + spin_unlock_irqrestore(&admin_vq->lock, flags); +} + +static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq, + u16 opcode, + struct scatterlist **sgs, + unsigned int out_num, + unsigned int in_num, + struct virtio_admin_cmd *cmd) +{ + struct virtqueue *vq; + unsigned long flags; + int ret; + + vq = admin_vq->info->vq; + if (!vq) + return -EIO; + + if (opcode != VIRTIO_ADMIN_CMD_LIST_QUERY && + opcode != VIRTIO_ADMIN_CMD_LIST_USE && + !((1ULL << opcode) & admin_vq->supported_cmds)) + return -EOPNOTSUPP; + + init_completion(&cmd->completion); + +again: + if (virtqueue_is_broken(vq)) + return -EIO; + + spin_lock_irqsave(&admin_vq->lock, flags); + ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL); + if (ret < 0) { + if (ret == -ENOSPC) { + spin_unlock_irqrestore(&admin_vq->lock, flags); + cpu_relax(); + goto again; + } + goto unlock_err; + } + if (!virtqueue_kick(vq)) + goto unlock_err; + spin_unlock_irqrestore(&admin_vq->lock, flags); + + wait_for_completion(&cmd->completion); + + return cmd->ret; + +unlock_err: + spin_unlock_irqrestore(&admin_vq->lock, flags); + return -EIO; +} + +int vp_modern_admin_cmd_exec(struct virtio_device *vdev, + struct virtio_admin_cmd *cmd) +{ + struct scatterlist *sgs[VIRTIO_AVQ_SGS_MAX], hdr, stat; + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtio_admin_cmd_status *va_status; + unsigned int out_num = 0, in_num = 0; + struct virtio_admin_cmd_hdr *va_hdr; + u16 status; + int ret; + + if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) + return -EOPNOTSUPP; + + va_status = kzalloc(sizeof(*va_status), GFP_KERNEL); + if (!va_status) + return -ENOMEM; + + va_hdr = kzalloc(sizeof(*va_hdr), GFP_KERNEL); + if (!va_hdr) { + ret = -ENOMEM; + goto err_alloc; + } + + va_hdr->opcode = cmd->opcode; + va_hdr->group_type = cmd->group_type; + va_hdr->group_member_id = cmd->group_member_id; + + /* Add header */ + sg_init_one(&hdr, va_hdr, sizeof(*va_hdr)); + sgs[out_num] = &hdr; + out_num++; + + if (cmd->data_sg) { + sgs[out_num] = cmd->data_sg; + out_num++; + } + + /* Add return status */ + sg_init_one(&stat, va_status, sizeof(*va_status)); + sgs[out_num + in_num] = &stat; + in_num++; + + if (cmd->result_sg) { + sgs[out_num + in_num] = cmd->result_sg; + in_num++; + } + + ret = virtqueue_exec_admin_cmd(&vp_dev->admin_vq, + le16_to_cpu(cmd->opcode), + sgs, out_num, in_num, cmd); + if (ret) { + dev_err(&vdev->dev, + "Failed to execute command on admin vq: %d\n.", ret); + goto err_cmd_exec; + } + + status = le16_to_cpu(va_status->status); + if (status != VIRTIO_ADMIN_STATUS_OK) { + dev_err(&vdev->dev, + "admin command error: status(%#x) qualifier(%#x)\n", + status, le16_to_cpu(va_status->status_qualifier)); + ret = -status; + } + +err_cmd_exec: + kfree(va_hdr); +err_alloc: + kfree(va_status); + return ret; +} + +static void virtio_pci_admin_cmd_list_init(struct virtio_device *virtio_dev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev); + struct virtio_admin_cmd cmd = {}; + struct scatterlist result_sg; + struct scatterlist data_sg; + __le64 *data; + int ret; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return; + + sg_init_one(&result_sg, data, sizeof(*data)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LIST_QUERY); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.result_sg = &result_sg; + + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (ret) + goto end; + + *data &= cpu_to_le64(VIRTIO_ADMIN_CMD_BITMAP); + sg_init_one(&data_sg, data, sizeof(*data)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LIST_USE); + cmd.data_sg = &data_sg; + cmd.result_sg = NULL; + + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (ret) + goto end; + + vp_dev->admin_vq.supported_cmds = le64_to_cpu(*data); +end: + kfree(data); +} + +static void +virtio_pci_admin_cmd_dev_parts_objects_enable(struct virtio_device *virtio_dev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev); + struct virtio_admin_cmd_cap_get_data *get_data; + struct virtio_admin_cmd_cap_set_data *set_data; + struct virtio_dev_parts_cap *result; + struct virtio_admin_cmd cmd = {}; + struct scatterlist result_sg; + struct scatterlist data_sg; + u8 resource_objects_limit; + u16 set_data_size; + int ret; + + get_data = kzalloc(sizeof(*get_data), GFP_KERNEL); + if (!get_data) + return; + + result = kzalloc(sizeof(*result), GFP_KERNEL); + if (!result) + goto end; + + get_data->id = cpu_to_le16(VIRTIO_DEV_PARTS_CAP); + sg_init_one(&data_sg, get_data, sizeof(*get_data)); + sg_init_one(&result_sg, result, sizeof(*result)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEVICE_CAP_GET); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SELF); + cmd.data_sg = &data_sg; + cmd.result_sg = &result_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (ret) + goto err_get; + + set_data_size = sizeof(*set_data) + sizeof(*result); + set_data = kzalloc(set_data_size, GFP_KERNEL); + if (!set_data) + goto err_get; + + set_data->id = cpu_to_le16(VIRTIO_DEV_PARTS_CAP); + + /* Set the limit to the minimum value between the GET and SET values + * supported by the device. Since the obj_id for VIRTIO_DEV_PARTS_CAP + * is a globally unique value per PF, there is no possibility of + * overlap between GET and SET operations. + */ + resource_objects_limit = min(result->get_parts_resource_objects_limit, + result->set_parts_resource_objects_limit); + result->get_parts_resource_objects_limit = resource_objects_limit; + result->set_parts_resource_objects_limit = resource_objects_limit; + memcpy(set_data->cap_specific_data, result, sizeof(*result)); + sg_init_one(&data_sg, set_data, set_data_size); + cmd.data_sg = &data_sg; + cmd.result_sg = NULL; + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DRIVER_CAP_SET); + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (ret) + goto err_set; + + /* Allocate IDR to manage the dev caps objects */ + ida_init(&vp_dev->admin_vq.dev_parts_ida); + vp_dev->admin_vq.max_dev_parts_objects = resource_objects_limit; + +err_set: + kfree(set_data); +err_get: + kfree(result); +end: + kfree(get_data); +} + +static void virtio_pci_admin_cmd_cap_init(struct virtio_device *virtio_dev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev); + struct virtio_admin_cmd_query_cap_id_result *data; + struct virtio_admin_cmd cmd = {}; + struct scatterlist result_sg; + int ret; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return; + + sg_init_one(&result_sg, data, sizeof(*data)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SELF); + cmd.result_sg = &result_sg; + + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (ret) + goto end; + + /* Max number of caps fits into a single u64 */ + BUILD_BUG_ON(sizeof(data->supported_caps) > sizeof(u64)); + + vp_dev->admin_vq.supported_caps = le64_to_cpu(data->supported_caps[0]); + + if (!(vp_dev->admin_vq.supported_caps & (1 << VIRTIO_DEV_PARTS_CAP))) + goto end; + + virtio_pci_admin_cmd_dev_parts_objects_enable(virtio_dev); +end: + kfree(data); +} + +static void vp_modern_avq_activate(struct virtio_device *vdev) +{ + if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) + return; + + virtio_pci_admin_cmd_list_init(vdev); + virtio_pci_admin_cmd_cap_init(vdev); +} + +static void vp_modern_avq_cleanup(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtio_admin_cmd *cmd; + struct virtqueue *vq; + + if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) + return; + + vq = vp_dev->admin_vq.info->vq; + if (!vq) + return; + + while ((cmd = virtqueue_detach_unused_buf(vq))) { + cmd->ret = -EIO; + complete(&cmd->completion); + } } static void vp_transport_features(struct virtio_device *vdev, u64 features) @@ -37,6 +375,45 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features) if (features & BIT_ULL(VIRTIO_F_RING_RESET)) __virtio_set_bit(vdev, VIRTIO_F_RING_RESET); + + if (features & BIT_ULL(VIRTIO_F_ADMIN_VQ)) + __virtio_set_bit(vdev, VIRTIO_F_ADMIN_VQ); +} + +static int __vp_check_common_size_one_feature(struct virtio_device *vdev, u32 fbit, + u32 offset, const char *fname) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + if (!__virtio_test_bit(vdev, fbit)) + return 0; + + if (likely(vp_dev->mdev.common_len >= offset)) + return 0; + + dev_err(&vdev->dev, + "virtio: common cfg size(%zu) does not match the feature %s\n", + vp_dev->mdev.common_len, fname); + + return -EINVAL; +} + +#define vp_check_common_size_one_feature(vdev, fbit, field) \ + __vp_check_common_size_one_feature(vdev, fbit, \ + offsetofend(struct virtio_pci_modern_common_cfg, field), #fbit) + +static int vp_check_common_size(struct virtio_device *vdev) +{ + if (vp_check_common_size_one_feature(vdev, VIRTIO_F_NOTIF_CONFIG_DATA, queue_notify_data)) + return -EINVAL; + + if (vp_check_common_size_one_feature(vdev, VIRTIO_F_RING_RESET, queue_reset)) + return -EINVAL; + + if (vp_check_common_size_one_feature(vdev, VIRTIO_F_ADMIN_VQ, admin_queue_num)) + return -EINVAL; + + return 0; } /* virtio config->finalize_features() implementation */ @@ -57,7 +434,10 @@ static int vp_finalize_features(struct virtio_device *vdev) return -EINVAL; } - vp_modern_set_features(&vp_dev->mdev, vdev->features); + if (vp_check_common_size(vdev)) + return -EINVAL; + + vp_modern_set_extended_features(&vp_dev->mdev, vdev->features_array); return 0; } @@ -159,6 +539,8 @@ static void vp_set_status(struct virtio_device *vdev, u8 status) /* We should never be setting status to 0. */ BUG_ON(status == 0); vp_modern_set_status(&vp_dev->mdev, status); + if (status & VIRTIO_CONFIG_S_DRIVER_OK) + vp_modern_avq_activate(vdev); } static void vp_reset(struct virtio_device *vdev) @@ -175,6 +557,9 @@ static void vp_reset(struct virtio_device *vdev) */ while (vp_modern_get_status(mdev)) msleep(1); + + vp_modern_avq_cleanup(vdev); + /* Flush pending VQ/configuration callbacks. */ vp_synchronize_vectors(vdev); } @@ -309,6 +694,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, struct virtio_pci_modern_device *mdev = &vp_dev->mdev; bool (*notify)(struct virtqueue *vq); struct virtqueue *vq; + bool is_avq; u16 num; int err; @@ -317,11 +703,12 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, else notify = vp_notify; - if (index >= vp_modern_get_num_queues(mdev)) + is_avq = vp_is_avq(&vp_dev->vdev, index); + if (index >= vp_modern_get_num_queues(mdev) && !is_avq) return ERR_PTR(-EINVAL); - /* Check if queue is either not available or already active. */ num = vp_modern_get_queue_size(mdev, index); + /* Check if queue is either not available or already active. */ if (!num || vp_modern_get_queue_enable(mdev, index)) return ERR_PTR(-ENOENT); @@ -356,13 +743,12 @@ err: static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned int nvqs, struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char * const names[], const bool *ctx, + struct virtqueue_info vqs_info[], struct irq_affinity *desc) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtqueue *vq; - int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc); + int rc = vp_find_vqs(vdev, nvqs, vqs, vqs_info, desc); if (rc) return rc; @@ -491,6 +877,353 @@ static bool vp_get_shm_region(struct virtio_device *vdev, return true; } +/* + * virtio_pci_admin_has_dev_parts - Checks whether the device parts + * functionality is supported + * @pdev: VF pci_dev + * + * Returns true on success. + */ +bool virtio_pci_admin_has_dev_parts(struct pci_dev *pdev) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_pci_device *vp_dev; + + if (!virtio_dev) + return false; + + if (!virtio_has_feature(virtio_dev, VIRTIO_F_ADMIN_VQ)) + return false; + + vp_dev = to_vp_device(virtio_dev); + + if (!((vp_dev->admin_vq.supported_cmds & VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP) == + VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP)) + return false; + + return vp_dev->admin_vq.max_dev_parts_objects; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_has_dev_parts); + +/* + * virtio_pci_admin_mode_set - Sets the mode of a member device + * @pdev: VF pci_dev + * @flags: device mode's flags + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_mode_set(struct pci_dev *pdev, u8 flags) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_admin_cmd_dev_mode_set_data *data; + struct virtio_admin_cmd cmd = {}; + struct scatterlist data_sg; + int vf_id; + int ret; + + if (!virtio_dev) + return -ENODEV; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->flags = flags; + sg_init_one(&data_sg, data, sizeof(*data)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_MODE_SET); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = &data_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + + kfree(data); + return ret; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_mode_set); + +/* + * virtio_pci_admin_obj_create - Creates an object for a given type and operation, + * following the max objects that can be created for that request. + * @pdev: VF pci_dev + * @obj_type: Object type + * @operation_type: Operation type + * @obj_id: Output unique object id + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_obj_create(struct pci_dev *pdev, u16 obj_type, u8 operation_type, + u32 *obj_id) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + u16 data_size = sizeof(struct virtio_admin_cmd_resource_obj_create_data); + struct virtio_admin_cmd_resource_obj_create_data *obj_create_data; + struct virtio_resource_obj_dev_parts obj_dev_parts = {}; + struct virtio_pci_admin_vq *avq; + struct virtio_admin_cmd cmd = {}; + struct scatterlist data_sg; + void *data; + int id = -1; + int vf_id; + int ret; + + if (!virtio_dev) + return -ENODEV; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + if (obj_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS) + return -EOPNOTSUPP; + + if (operation_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_GET && + operation_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_SET) + return -EINVAL; + + avq = &to_vp_device(virtio_dev)->admin_vq; + if (!avq->max_dev_parts_objects) + return -EOPNOTSUPP; + + id = ida_alloc_range(&avq->dev_parts_ida, 0, + avq->max_dev_parts_objects - 1, GFP_KERNEL); + if (id < 0) + return id; + + *obj_id = id; + data_size += sizeof(obj_dev_parts); + data = kzalloc(data_size, GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + goto end; + } + + obj_create_data = data; + obj_create_data->hdr.type = cpu_to_le16(obj_type); + obj_create_data->hdr.id = cpu_to_le32(*obj_id); + obj_dev_parts.type = operation_type; + memcpy(obj_create_data->resource_obj_specific_data, &obj_dev_parts, + sizeof(obj_dev_parts)); + sg_init_one(&data_sg, data, data_size); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_CREATE); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = &data_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + + kfree(data); +end: + if (ret) + ida_free(&avq->dev_parts_ida, id); + + return ret; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_obj_create); + +/* + * virtio_pci_admin_obj_destroy - Destroys an object of a given type and id + * @pdev: VF pci_dev + * @obj_type: Object type + * @id: Object id + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_obj_destroy(struct pci_dev *pdev, u16 obj_type, u32 id) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_admin_cmd_resource_obj_cmd_hdr *data; + struct virtio_pci_device *vp_dev; + struct virtio_admin_cmd cmd = {}; + struct scatterlist data_sg; + int vf_id; + int ret; + + if (!virtio_dev) + return -ENODEV; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + if (obj_type != VIRTIO_RESOURCE_OBJ_DEV_PARTS) + return -EINVAL; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->type = cpu_to_le16(obj_type); + data->id = cpu_to_le32(id); + sg_init_one(&data_sg, data, sizeof(*data)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_DESTROY); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = &data_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (!ret) { + vp_dev = to_vp_device(virtio_dev); + ida_free(&vp_dev->admin_vq.dev_parts_ida, id); + } + + kfree(data); + return ret; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_obj_destroy); + +/* + * virtio_pci_admin_dev_parts_metadata_get - Gets the metadata of the device parts + * identified by the below attributes. + * @pdev: VF pci_dev + * @obj_type: Object type + * @id: Object id + * @metadata_type: Metadata type + * @out: Upon success holds the output for 'metadata type size' + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_dev_parts_metadata_get(struct pci_dev *pdev, u16 obj_type, + u32 id, u8 metadata_type, u32 *out) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_admin_cmd_dev_parts_metadata_result *result; + struct virtio_admin_cmd_dev_parts_metadata_data *data; + struct scatterlist data_sg, result_sg; + struct virtio_admin_cmd cmd = {}; + int vf_id; + int ret; + + if (!virtio_dev) + return -ENODEV; + + if (metadata_type != VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_SIZE) + return -EOPNOTSUPP; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + result = kzalloc(sizeof(*result), GFP_KERNEL); + if (!result) { + ret = -ENOMEM; + goto end; + } + + data->hdr.type = cpu_to_le16(obj_type); + data->hdr.id = cpu_to_le32(id); + data->type = metadata_type; + sg_init_one(&data_sg, data, sizeof(*data)); + sg_init_one(&result_sg, result, sizeof(*result)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_GET); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = &data_sg; + cmd.result_sg = &result_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (!ret) + *out = le32_to_cpu(result->parts_size.size); + + kfree(result); +end: + kfree(data); + return ret; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_metadata_get); + +/* + * virtio_pci_admin_dev_parts_get - Gets the device parts identified by the below attributes. + * @pdev: VF pci_dev + * @obj_type: Object type + * @id: Object id + * @get_type: Get type + * @res_sg: Upon success holds the output result data + * @res_size: Upon success holds the output result size + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_dev_parts_get(struct pci_dev *pdev, u16 obj_type, u32 id, + u8 get_type, struct scatterlist *res_sg, + u32 *res_size) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_admin_cmd_dev_parts_get_data *data; + struct scatterlist data_sg; + struct virtio_admin_cmd cmd = {}; + int vf_id; + int ret; + + if (!virtio_dev) + return -ENODEV; + + if (get_type != VIRTIO_ADMIN_CMD_DEV_PARTS_GET_TYPE_ALL) + return -EOPNOTSUPP; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->hdr.type = cpu_to_le16(obj_type); + data->hdr.id = cpu_to_le32(id); + data->type = get_type; + sg_init_one(&data_sg, data, sizeof(*data)); + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_GET); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = &data_sg; + cmd.result_sg = res_sg; + ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd); + if (!ret) + *res_size = cmd.result_sg_size; + + kfree(data); + return ret; +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_get); + +/* + * virtio_pci_admin_dev_parts_set - Sets the device parts identified by the below attributes. + * @pdev: VF pci_dev + * @data_sg: The device parts data, its layout follows struct virtio_admin_cmd_dev_parts_set_data + * + * Note: caller must serialize access for the given device. + * Returns 0 on success, or negative on failure. + */ +int virtio_pci_admin_dev_parts_set(struct pci_dev *pdev, struct scatterlist *data_sg) +{ + struct virtio_device *virtio_dev = virtio_pci_vf_get_pf_dev(pdev); + struct virtio_admin_cmd cmd = {}; + int vf_id; + + if (!virtio_dev) + return -ENODEV; + + vf_id = pci_iov_vf_id(pdev); + if (vf_id < 0) + return vf_id; + + cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_DEV_PARTS_SET); + cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV); + cmd.group_member_id = cpu_to_le64(vf_id + 1); + cmd.data_sg = data_sg; + return vp_modern_admin_cmd_exec(virtio_dev, &cmd); +} +EXPORT_SYMBOL_GPL(virtio_pci_admin_dev_parts_set); + static const struct virtio_config_ops virtio_pci_config_nodev_ops = { .get = NULL, .set = NULL, @@ -501,7 +1234,7 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = { .find_vqs = vp_modern_find_vqs, .del_vqs = vp_del_vqs, .synchronize_cbs = vp_synchronize_vectors, - .get_features = vp_get_features, + .get_extended_features = vp_get_features, .finalize_features = vp_finalize_features, .bus_name = vp_bus_name, .set_vq_affinity = vp_set_vq_affinity, @@ -521,7 +1254,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = { .find_vqs = vp_modern_find_vqs, .del_vqs = vp_del_vqs, .synchronize_cbs = vp_synchronize_vectors, - .get_features = vp_get_features, + .get_extended_features = vp_get_features, .finalize_features = vp_finalize_features, .bus_name = vp_bus_name, .set_vq_affinity = vp_set_vq_affinity, @@ -552,9 +1285,11 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev) vp_dev->config_vector = vp_config_vector; vp_dev->setup_vq = setup_vq; vp_dev->del_vq = del_vq; + vp_dev->avq_index = vp_avq_index; vp_dev->isr = mdev->isr; vp_dev->vdev.id = mdev->id; + spin_lock_init(&vp_dev->admin_vq.lock); return 0; } |
