summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/udl/udl_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/udl/udl_main.c')
-rw-r--r--drivers/gpu/drm/udl/udl_main.c311
1 files changed, 167 insertions, 144 deletions
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 853f147036f6..bc58991a6f14 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -8,6 +8,8 @@
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*/
+#include <linux/unaligned.h>
+
#include <drm/drm.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -20,82 +22,108 @@
#define NR_USB_REQUEST_CHANNEL 0x12
#define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
-#define WRITES_IN_FLIGHT (4)
+#define WRITES_IN_FLIGHT (20)
#define MAX_VENDOR_DESCRIPTOR_SIZE 256
-#define GET_URB_TIMEOUT HZ
-#define FREE_URB_TIMEOUT (HZ*2)
+#define UDL_SKU_PIXEL_LIMIT_DEFAULT 2080000
+
+static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout);
+
+/*
+ * Try to make sense of whatever we parse. Therefore return @end on
+ * errors, but don't fail hard.
+ */
+static const u8 *udl_parse_key_value_pair(struct udl_device *udl, const u8 *pos, const u8 *end)
+{
+ u16 key;
+ u8 len;
+
+ /* read key */
+ if (pos >= end - 2)
+ return end;
+ key = get_unaligned_le16(pos);
+ pos += 2;
+
+ /* read value length */
+ if (pos >= end - 1)
+ return end;
+ len = *pos++;
+
+ /* read value */
+ if (pos >= end - len)
+ return end;
+ switch (key) {
+ case 0x0200: { /* maximum number of pixels */
+ unsigned int sku_pixel_limit;
+
+ if (len < sizeof(__le32))
+ break;
+ sku_pixel_limit = get_unaligned_le32(pos);
+ if (sku_pixel_limit >= 16 * UDL_SKU_PIXEL_LIMIT_DEFAULT)
+ break; /* almost 100 MiB, so probably bogus */
+ udl->sku_pixel_limit = sku_pixel_limit;
+ break;
+ }
+ default:
+ break;
+ }
+ pos += len;
+
+ return pos;
+}
static int udl_parse_vendor_descriptor(struct udl_device *udl)
{
+ struct drm_device *dev = &udl->drm;
struct usb_device *udev = udl_to_usb_device(udl);
- char *desc;
- char *buf;
- char *desc_end;
-
- u8 total_len = 0;
+ bool detected = false;
+ void *buf;
+ int ret;
+ unsigned int len;
+ const u8 *desc;
+ const u8 *desc_end;
buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
if (!buf)
- return false;
+ return -ENOMEM;
+
+ ret = usb_get_descriptor(udev, 0x5f, /* vendor specific */
+ 0, buf, MAX_VENDOR_DESCRIPTOR_SIZE);
+ if (ret < 0)
+ goto out;
+ len = ret;
+
+ if (len < 5)
+ goto out;
+
desc = buf;
+ desc_end = desc + len;
- total_len = usb_get_descriptor(udev, 0x5f, /* vendor specific */
- 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
- if (total_len > 5) {
- DRM_INFO("vendor descriptor length:%x data:%11ph\n",
- total_len, desc);
-
- if ((desc[0] != total_len) || /* descriptor length */
- (desc[1] != 0x5f) || /* vendor descriptor type */
- (desc[2] != 0x01) || /* version (2 bytes) */
- (desc[3] != 0x00) ||
- (desc[4] != total_len - 2)) /* length after type */
- goto unrecognized;
-
- desc_end = desc + total_len;
- desc += 5; /* the fixed header we've already parsed */
-
- while (desc < desc_end) {
- u8 length;
- u16 key;
-
- key = le16_to_cpu(*((u16 *) desc));
- desc += sizeof(u16);
- length = *desc;
- desc++;
-
- switch (key) {
- case 0x0200: { /* max_area */
- u32 max_area;
- max_area = le32_to_cpu(*((u32 *)desc));
- DRM_DEBUG("DL chip limited to %d pixel modes\n",
- max_area);
- udl->sku_pixel_limit = max_area;
- break;
- }
- default:
- break;
- }
- desc += length;
- }
- }
+ if ((desc[0] != len) || /* descriptor length */
+ (desc[1] != 0x5f) || /* vendor descriptor type */
+ (desc[2] != 0x01) || /* version (2 bytes) */
+ (desc[3] != 0x00) ||
+ (desc[4] != len - 2)) /* length after type */
+ goto out;
+ desc += 5;
- goto success;
+ detected = true;
-unrecognized:
- /* allow udlfb to load for now even if firmware unrecognized */
- DRM_ERROR("Unrecognized vendor firmware descriptor\n");
+ while (desc < desc_end)
+ desc = udl_parse_key_value_pair(udl, desc, desc_end);
-success:
+out:
+ if (!detected)
+ drm_warn(dev, "Unrecognized vendor firmware descriptor\n");
kfree(buf);
- return true;
+
+ return 0;
}
/*
* Need to ensure a channel is selected before submitting URBs
*/
-static int udl_select_std_channel(struct udl_device *udl)
+int udl_select_std_channel(struct udl_device *udl)
{
static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
0x1C, 0x88, 0x5E, 0x15,
@@ -119,14 +147,6 @@ static int udl_select_std_channel(struct udl_device *udl)
return ret < 0 ? ret : 0;
}
-static void udl_release_urb_work(struct work_struct *work)
-{
- struct urb_node *unode = container_of(work, struct urb_node,
- release_urb_work.work);
-
- up(&unode->dev->urbs.limit_sem);
-}
-
void udl_urb_completion(struct urb *urb)
{
struct urb_node *unode = urb->context;
@@ -137,6 +157,7 @@ void udl_urb_completion(struct urb *urb)
if (urb->status) {
if (!(urb->status == -ENOENT ||
urb->status == -ECONNRESET ||
+ urb->status == -EPROTO ||
urb->status == -ESHUTDOWN)) {
DRM_ERROR("%s - nonzero write bulk status received: %d\n",
__func__, urb->status);
@@ -150,54 +171,37 @@ void udl_urb_completion(struct urb *urb)
udl->urbs.available++;
spin_unlock_irqrestore(&udl->urbs.lock, flags);
-#if 0
- /*
- * When using fb_defio, we deadlock if up() is called
- * while another is waiting. So queue to another process.
- */
- if (fb_defio)
- schedule_delayed_work(&unode->release_urb_work, 0);
- else
-#endif
- up(&udl->urbs.limit_sem);
+ wake_up(&udl->urbs.sleep);
}
-static void udl_free_urb_list(struct drm_device *dev)
+static void udl_free_urb_list(struct udl_device *udl)
{
- struct udl_device *udl = to_udl(dev);
- int count = udl->urbs.count;
- struct list_head *node;
struct urb_node *unode;
struct urb *urb;
DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
/* keep waiting and freeing, until we've got 'em all */
- while (count--) {
- down(&udl->urbs.limit_sem);
-
+ while (udl->urbs.count) {
spin_lock_irq(&udl->urbs.lock);
-
- node = udl->urbs.list.next; /* have reserved one with sem */
- list_del_init(node);
-
+ urb = udl_get_urb_locked(udl, MAX_SCHEDULE_TIMEOUT);
+ udl->urbs.count--;
spin_unlock_irq(&udl->urbs.lock);
-
- unode = list_entry(node, struct urb_node, entry);
- urb = unode->urb;
-
+ if (WARN_ON(!urb))
+ break;
+ unode = urb->context;
/* Free each separately allocated piece */
usb_free_coherent(urb->dev, udl->urbs.size,
urb->transfer_buffer, urb->transfer_dma);
usb_free_urb(urb);
- kfree(node);
+ kfree(unode);
}
- udl->urbs.count = 0;
+
+ wake_up_all(&udl->urbs.sleep);
}
-static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
+static int udl_alloc_urb_list(struct udl_device *udl, int count, size_t size)
{
- struct udl_device *udl = to_udl(dev);
struct urb *urb;
struct urb_node *unode;
char *buf;
@@ -205,24 +209,20 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
struct usb_device *udev = udl_to_usb_device(udl);
spin_lock_init(&udl->urbs.lock);
-
-retry:
- udl->urbs.size = size;
INIT_LIST_HEAD(&udl->urbs.list);
-
- sema_init(&udl->urbs.limit_sem, 0);
+ init_waitqueue_head(&udl->urbs.sleep);
udl->urbs.count = 0;
udl->urbs.available = 0;
+retry:
+ udl->urbs.size = size;
+
while (udl->urbs.count * size < wanted_size) {
unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
if (!unode)
break;
unode->dev = udl;
- INIT_DELAYED_WORK(&unode->release_urb_work,
- udl_release_urb_work);
-
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
kfree(unode);
@@ -237,7 +237,7 @@ retry:
usb_free_urb(urb);
if (size > PAGE_SIZE) {
size /= 2;
- udl_free_urb_list(dev);
+ udl_free_urb_list(udl);
goto retry;
}
break;
@@ -250,7 +250,6 @@ retry:
list_add_tail(&unode->entry, &udl->urbs.list);
- up(&udl->urbs.limit_sem);
udl->urbs.count++;
udl->urbs.available++;
}
@@ -260,47 +259,54 @@ retry:
return udl->urbs.count;
}
-struct urb *udl_get_urb(struct drm_device *dev)
+static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout)
{
- struct udl_device *udl = to_udl(dev);
- int ret = 0;
- struct list_head *entry;
struct urb_node *unode;
- struct urb *urb = NULL;
+
+ assert_spin_locked(&udl->urbs.lock);
/* Wait for an in-flight buffer to complete and get re-queued */
- ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
- if (ret) {
- DRM_INFO("wait for urb interrupted: %x available: %d\n",
- ret, udl->urbs.available);
- goto error;
+ if (!wait_event_lock_irq_timeout(udl->urbs.sleep,
+ !udl->urbs.count ||
+ !list_empty(&udl->urbs.list),
+ udl->urbs.lock, timeout)) {
+ DRM_INFO("wait for urb interrupted: available: %d\n",
+ udl->urbs.available);
+ return NULL;
}
- spin_lock_irq(&udl->urbs.lock);
+ if (!udl->urbs.count)
+ return NULL;
- BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */
- entry = udl->urbs.list.next;
- list_del_init(entry);
+ unode = list_first_entry(&udl->urbs.list, struct urb_node, entry);
+ list_del_init(&unode->entry);
udl->urbs.available--;
- spin_unlock_irq(&udl->urbs.lock);
+ return unode->urb;
+}
- unode = list_entry(entry, struct urb_node, entry);
- urb = unode->urb;
+#define GET_URB_TIMEOUT HZ
+struct urb *udl_get_urb(struct udl_device *udl)
+{
+ struct urb *urb;
-error:
+ spin_lock_irq(&udl->urbs.lock);
+ urb = udl_get_urb_locked(udl, GET_URB_TIMEOUT);
+ spin_unlock_irq(&udl->urbs.lock);
return urb;
}
-int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
+int udl_submit_urb(struct udl_device *udl, struct urb *urb, size_t len)
{
- struct udl_device *udl = to_udl(dev);
int ret;
- BUG_ON(len > udl->urbs.size);
-
+ if (WARN_ON(len > udl->urbs.size)) {
+ ret = -EINVAL;
+ goto error;
+ }
urb->transfer_buffer_length = len; /* set to actual payload len */
ret = usb_submit_urb(urb, GFP_ATOMIC);
+ error:
if (ret) {
udl_urb_completion(urb); /* because no one else will */
DRM_ERROR("usb_submit_urb error %x\n", ret);
@@ -308,57 +314,74 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
return ret;
}
+/* wait until all pending URBs have been processed */
+void udl_sync_pending_urbs(struct udl_device *udl)
+{
+ struct drm_device *dev = &udl->drm;
+
+ spin_lock_irq(&udl->urbs.lock);
+ /* 2 seconds as a sane timeout */
+ if (!wait_event_lock_irq_timeout(udl->urbs.sleep,
+ udl->urbs.available == udl->urbs.count,
+ udl->urbs.lock,
+ msecs_to_jiffies(2000)))
+ drm_err(dev, "Timeout for syncing pending URBs\n");
+ spin_unlock_irq(&udl->urbs.lock);
+}
+
int udl_init(struct udl_device *udl)
{
struct drm_device *dev = &udl->drm;
int ret = -ENOMEM;
+ struct device *dma_dev;
DRM_DEBUG("\n");
- udl->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
- if (!udl->dmadev)
+ dma_dev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
+ if (dma_dev) {
+ drm_dev_set_dma_dev(dev, dma_dev);
+ put_device(dma_dev);
+ } else {
drm_warn(dev, "buffer sharing not supported"); /* not an error */
+ }
- mutex_init(&udl->gem_lock);
+ /*
+ * Not all devices provide vendor descriptors with device
+ * information. Initialize to default values of real-world
+ * devices. It is just enough memory for FullHD.
+ */
+ udl->sku_pixel_limit = UDL_SKU_PIXEL_LIMIT_DEFAULT;
- if (!udl_parse_vendor_descriptor(udl)) {
- ret = -ENODEV;
- DRM_ERROR("firmware not recognized. Assume incompatible device\n");
+ ret = udl_parse_vendor_descriptor(udl);
+ if (ret)
goto err;
- }
if (udl_select_std_channel(udl))
DRM_ERROR("Selecting channel failed\n");
- if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
+ if (!udl_alloc_urb_list(udl, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
DRM_ERROR("udl_alloc_urb_list failed\n");
+ ret = -ENOMEM;
goto err;
}
DRM_DEBUG("\n");
- ret = udl_modeset_init(dev);
+ ret = udl_modeset_init(udl);
if (ret)
goto err;
- drm_kms_helper_poll_init(dev);
-
return 0;
err:
if (udl->urbs.count)
- udl_free_urb_list(dev);
- put_device(udl->dmadev);
+ udl_free_urb_list(udl);
DRM_ERROR("%d\n", ret);
return ret;
}
-int udl_drop_usb(struct drm_device *dev)
+int udl_drop_usb(struct udl_device *udl)
{
- struct udl_device *udl = to_udl(dev);
-
- udl_free_urb_list(dev);
- put_device(udl->dmadev);
- udl->dmadev = NULL;
+ udl_free_urb_list(udl);
return 0;
}