diff options
Diffstat (limited to 'drivers')
394 files changed, 14961 insertions, 11925 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 7802d8846a8d..44ad4b6bd234 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -212,6 +212,7 @@ config ACPI_VIDEO tristate "Video" depends on BACKLIGHT_CLASS_DEVICE depends on INPUT + depends on ACPI_WMI || !X86 select THERMAL help This driver implements the ACPI Extensions For Display Adapters diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 5cbe2196176d..a7c3d11e0dac 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -73,6 +73,16 @@ module_param(device_id_scheme, bool, 0444); static int only_lcd = -1; module_param(only_lcd, int, 0444); +/* + * Display probing is known to take up to 5 seconds, so delay the fallback + * backlight registration by 5 seconds + 3 seconds for some extra margin. + */ +static int register_backlight_delay = 8; +module_param(register_backlight_delay, int, 0444); +MODULE_PARM_DESC(register_backlight_delay, + "Delay in seconds before doing fallback (non GPU driver triggered) " + "backlight registration, set to 0 to disable."); + static bool may_report_brightness_keys; static int register_count; static DEFINE_MUTEX(register_count_mutex); @@ -81,7 +91,9 @@ static LIST_HEAD(video_bus_head); static int acpi_video_bus_add(struct acpi_device *device); static int acpi_video_bus_remove(struct acpi_device *device); static void acpi_video_bus_notify(struct acpi_device *device, u32 event); -void acpi_video_detect_exit(void); +static void acpi_video_bus_register_backlight_work(struct work_struct *ignored); +static DECLARE_DELAYED_WORK(video_bus_register_backlight_work, + acpi_video_bus_register_backlight_work); /* * Indices in the _BCL method response: the first two items are special, @@ -1859,8 +1871,6 @@ static int acpi_video_bus_register_backlight(struct acpi_video_bus *video) if (video->backlight_registered) return 0; - acpi_video_run_bcl_for_osi(video); - if (acpi_video_get_backlight_type() != acpi_backlight_video) return 0; @@ -2086,7 +2096,11 @@ static int acpi_video_bus_add(struct acpi_device *device) list_add_tail(&video->entry, &video_bus_head); mutex_unlock(&video_list_lock); - acpi_video_bus_register_backlight(video); + /* + * The userspace visible backlight_device gets registered separately + * from acpi_video_register_backlight(). + */ + acpi_video_run_bcl_for_osi(video); acpi_video_bus_add_notify_handler(video); return 0; @@ -2111,20 +2125,25 @@ static int acpi_video_bus_remove(struct acpi_device *device) video = acpi_driver_data(device); - acpi_video_bus_remove_notify_handler(video); - acpi_video_bus_unregister_backlight(video); - acpi_video_bus_put_devices(video); - mutex_lock(&video_list_lock); list_del(&video->entry); mutex_unlock(&video_list_lock); + acpi_video_bus_remove_notify_handler(video); + acpi_video_bus_unregister_backlight(video); + acpi_video_bus_put_devices(video); + kfree(video->attached_array); kfree(video); return 0; } +static void acpi_video_bus_register_backlight_work(struct work_struct *ignored) +{ + acpi_video_register_backlight(); +} + static int __init is_i740(struct pci_dev *dev) { if (dev->device == 0x00D1) @@ -2235,6 +2254,18 @@ int acpi_video_register(void) */ register_count = 1; + /* + * acpi_video_bus_add() skips registering the userspace visible + * backlight_device. The intend is for this to be registered by the + * drm/kms driver calling acpi_video_register_backlight() *after* it is + * done setting up its own native backlight device. The delayed work + * ensures that acpi_video_register_backlight() always gets called + * eventually, in case there is no drm/kms driver or it is disabled. + */ + if (register_backlight_delay) + schedule_delayed_work(&video_bus_register_backlight_work, + register_backlight_delay * HZ); + leave: mutex_unlock(®ister_count_mutex); return ret; @@ -2245,6 +2276,7 @@ void acpi_video_unregister(void) { mutex_lock(®ister_count_mutex); if (register_count) { + cancel_delayed_work_sync(&video_bus_register_backlight_work); acpi_bus_unregister_driver(&acpi_video_bus); register_count = 0; may_report_brightness_keys = false; @@ -2253,19 +2285,16 @@ void acpi_video_unregister(void) } EXPORT_SYMBOL(acpi_video_unregister); -void acpi_video_unregister_backlight(void) +void acpi_video_register_backlight(void) { struct acpi_video_bus *video; - mutex_lock(®ister_count_mutex); - if (register_count) { - mutex_lock(&video_list_lock); - list_for_each_entry(video, &video_bus_head, entry) - acpi_video_bus_unregister_backlight(video); - mutex_unlock(&video_list_lock); - } - mutex_unlock(®ister_count_mutex); + mutex_lock(&video_list_lock); + list_for_each_entry(video, &video_bus_head, entry) + acpi_video_bus_register_backlight(video); + mutex_unlock(&video_list_lock); } +EXPORT_SYMBOL(acpi_video_register_backlight); bool acpi_video_handles_brightness_key_presses(void) { @@ -2302,7 +2331,6 @@ static int __init acpi_video_init(void) static void __exit acpi_video_exit(void) { - acpi_video_detect_exit(); acpi_video_unregister(); } diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 5d7f38016a24..db2474fe58ac 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -17,8 +17,9 @@ * Otherwise vendor specific drivers like thinkpad_acpi, asus-laptop, * sony_acpi,... can take care about backlight brightness. * - * Backlight drivers can use acpi_video_get_backlight_type() to determine - * which driver should handle the backlight. + * Backlight drivers can use acpi_video_get_backlight_type() to determine which + * driver should handle the backlight. RAW/GPU-driver backlight drivers must + * use the acpi_video_backlight_use_native() helper for this. * * If CONFIG_ACPI_VIDEO is neither set as "compiled in" (y) nor as a module (m) * this file will not be compiled and acpi_video_get_backlight_type() will @@ -27,20 +28,16 @@ #include <linux/export.h> #include <linux/acpi.h> +#include <linux/apple-gmux.h> #include <linux/backlight.h> #include <linux/dmi.h> #include <linux/module.h> #include <linux/pci.h> +#include <linux/platform_data/x86/nvidia-wmi-ec-backlight.h> #include <linux/types.h> #include <linux/workqueue.h> #include <acpi/video.h> -void acpi_video_unregister_backlight(void); - -static bool backlight_notifier_registered; -static struct notifier_block backlight_nb; -static struct work_struct backlight_notify_work; - static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef; static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef; @@ -78,6 +75,36 @@ find_video(acpi_handle handle, u32 lvl, void *context, void **rv) return AE_OK; } +/* This depends on ACPI_WMI which is X86 only */ +#ifdef CONFIG_X86 +static bool nvidia_wmi_ec_supported(void) +{ + struct wmi_brightness_args args = { + .mode = WMI_BRIGHTNESS_MODE_GET, + .val = 0, + .ret = 0, + }; + struct acpi_buffer buf = { (acpi_size)sizeof(args), &args }; + acpi_status status; + + status = wmi_evaluate_method(WMI_BRIGHTNESS_GUID, 0, + WMI_BRIGHTNESS_METHOD_SOURCE, &buf, &buf); + if (ACPI_FAILURE(status)) + return false; + + /* + * If brightness is handled by the EC then nvidia-wmi-ec-backlight + * should be used, else the GPU driver(s) should be used. + */ + return args.ret == WMI_BRIGHTNESS_SOURCE_EC; +} +#else +static bool nvidia_wmi_ec_supported(void) +{ + return false; +} +#endif + /* Force to use vendor driver when the ACPI device is known to be * buggy */ static int video_detect_force_vendor(const struct dmi_system_id *d) @@ -105,63 +132,143 @@ static int video_detect_force_none(const struct dmi_system_id *d) } static const struct dmi_system_id video_detect_dmi_table[] = { - /* On Samsung X360, the BIOS will set a flag (VDRV) if generic - * ACPI backlight device is used. This flag will definitively break - * the backlight interface (even the vendor interface) until next - * reboot. It's why we should prevent video.ko from being used here - * and we can't rely on a later call to acpi_video_unregister(). - */ { + /* https://bugzilla.redhat.com/show_bug.cgi?id=1128309 */ .callback = video_detect_force_vendor, - /* X360 */ + /* Acer KAV80 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "X360"), - DMI_MATCH(DMI_BOARD_NAME, "X360"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "KAV80"), }, }, { - .callback = video_detect_force_vendor, - /* Asus UL30VT */ - .matches = { + .callback = video_detect_force_vendor, + /* Asus UL30VT */ + .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"), }, }, { - .callback = video_detect_force_vendor, - /* Asus UL30A */ - .matches = { + .callback = video_detect_force_vendor, + /* Asus UL30A */ + .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"), }, }, { - .callback = video_detect_force_vendor, - /* GIGABYTE GB-BXBT-2807 */ - .matches = { + .callback = video_detect_force_vendor, + /* Asus X55U */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X55U"), + }, + }, + { + .callback = video_detect_force_vendor, + /* Asus X101CH */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X101CH"), + }, + }, + { + .callback = video_detect_force_vendor, + /* Asus X401U */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X401U"), + }, + }, + { + .callback = video_detect_force_vendor, + /* Asus X501U */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X501U"), + }, + }, + { + .callback = video_detect_force_vendor, + /* Asus 1015CX */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "1015CX"), + }, + }, + { + .callback = video_detect_force_vendor, + /* GIGABYTE GB-BXBT-2807 */ + .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), DMI_MATCH(DMI_PRODUCT_NAME, "GB-BXBT-2807"), }, }, { - .callback = video_detect_force_vendor, - /* Sony VPCEH3U1E */ - .matches = { + .callback = video_detect_force_vendor, + /* Samsung N150/N210/N220 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"), + DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"), + }, + }, + { + .callback = video_detect_force_vendor, + /* Samsung NF110/NF210/NF310 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"), + DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"), + }, + }, + { + .callback = video_detect_force_vendor, + /* Samsung NC210 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "NC210/NC110"), + DMI_MATCH(DMI_BOARD_NAME, "NC210/NC110"), + }, + }, + { + .callback = video_detect_force_vendor, + /* Sony VPCEH3U1E */ + .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VPCEH3U1E"), }, }, { - .callback = video_detect_force_vendor, - /* Xiaomi Mi Pad 2 */ - .matches = { + .callback = video_detect_force_vendor, + /* Xiaomi Mi Pad 2 */ + .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"), DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"), }, }, /* + * Toshiba models with Transflective display, these need to use + * the toshiba_acpi vendor driver for proper Transflective handling. + */ + { + .callback = video_detect_force_vendor, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R500"), + }, + }, + { + .callback = video_detect_force_vendor, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R600"), + }, + }, + + /* * These models have a working acpi_video backlight control, and using * native backlight causes a regression where backlight does not work * when userspace is not handling brightness key events. Disable @@ -390,130 +497,119 @@ static const struct dmi_system_id video_detect_dmi_table[] = { }, }, { - /* https://bugzilla.kernel.org/show_bug.cgi?id=207835 */ + /* https://bugzilla.redhat.com/show_bug.cgi?id=1012674 */ .callback = video_detect_force_native, - /* Acer TravelMate 5735Z */ + /* Acer Aspire 5741 */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5735Z"), - DMI_MATCH(DMI_BOARD_NAME, "BA51_MV"), + DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5741"), }, }, { - .callback = video_detect_force_native, - /* ASUSTeK COMPUTER INC. GA401 */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "GA401"), - }, - }, - { - .callback = video_detect_force_native, - /* ASUSTeK COMPUTER INC. GA502 */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "GA502"), + /* https://bugzilla.kernel.org/show_bug.cgi?id=42993 */ + .callback = video_detect_force_native, + /* Acer Aspire 5750 */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"), }, }, { - .callback = video_detect_force_native, - /* ASUSTeK COMPUTER INC. GA503 */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "GA503"), + /* https://bugzilla.kernel.org/show_bug.cgi?id=42833 */ + .callback = video_detect_force_native, + /* Acer Extensa 5235 */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5235"), }, }, - /* - * Clevo NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2 have both a - * working native and video interface. However the default detection - * mechanism first registers the video interface before unregistering - * it again and switching to the native interface during boot. This - * results in a dangling SBIOS request for backlight change for some - * reason, causing the backlight to switch to ~2% once per boot on the - * first power cord connect or disconnect event. Setting the native - * interface explicitly circumvents this buggy behaviour, by avoiding - * the unregistering process. - */ { - .callback = video_detect_force_native, - .ident = "Clevo NL5xRU", - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), + .callback = video_detect_force_native, + /* Acer TravelMate 4750 */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4750"), }, }, { - .callback = video_detect_force_native, - .ident = "Clevo NL5xRU", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), - DMI_MATCH(DMI_BOARD_NAME, "AURA1501"), + /* https://bugzilla.kernel.org/show_bug.cgi?id=207835 */ + .callback = video_detect_force_native, + /* Acer TravelMate 5735Z */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5735Z"), + DMI_MATCH(DMI_BOARD_NAME, "BA51_MV"), }, }, { - .callback = video_detect_force_native, - .ident = "Clevo NL5xRU", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), - DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"), + /* https://bugzilla.kernel.org/show_bug.cgi?id=36322 */ + .callback = video_detect_force_native, + /* Acer TravelMate 5760 */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5760"), }, }, { - .callback = video_detect_force_native, - .ident = "Clevo NL5xNU", - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), + .callback = video_detect_force_native, + /* ASUSTeK COMPUTER INC. GA401 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA401"), }, }, - /* - * The TongFang PF5PU1G, PF4NU1F, PF5NU1G, and PF5LUXG/TUXEDO BA15 Gen10, - * Pulse 14/15 Gen1, and Pulse 15 Gen2 have the same problem as the Clevo - * NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2. See the description - * above. - */ { - .callback = video_detect_force_native, - .ident = "TongFang PF5PU1G", - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "PF5PU1G"), + .callback = video_detect_force_native, + /* ASUSTeK COMPUTER INC. GA502 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA502"), }, }, { - .callback = video_detect_force_native, - .ident = "TongFang PF4NU1F", - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "PF4NU1F"), + .callback = video_detect_force_native, + /* ASUSTeK COMPUTER INC. GA503 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GA503"), }, }, { - .callback = video_detect_force_native, - .ident = "TongFang PF4NU1F", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), - DMI_MATCH(DMI_BOARD_NAME, "PULSE1401"), + .callback = video_detect_force_native, + /* Asus UX303UB */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "UX303UB"), }, }, { - .callback = video_detect_force_native, - .ident = "TongFang PF5NU1G", - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "PF5NU1G"), + .callback = video_detect_force_native, + /* Samsung N150P */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "N150P"), + DMI_MATCH(DMI_BOARD_NAME, "N150P"), }, }, { - .callback = video_detect_force_native, - .ident = "TongFang PF5NU1G", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), - DMI_MATCH(DMI_BOARD_NAME, "PULSE1501"), + .callback = video_detect_force_native, + /* Samsung N145P/N250P/N260P */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"), + DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"), }, }, { - .callback = video_detect_force_native, - .ident = "TongFang PF5LUXG", - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "PF5LUXG"), + .callback = video_detect_force_native, + /* Samsung N250P */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "N250P"), + DMI_MATCH(DMI_BOARD_NAME, "N250P"), }, }, + /* * Desktops which falsely report a backlight and which our heuristics * for this do not catch. @@ -537,43 +633,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = { { }, }; -/* This uses a workqueue to avoid various locking ordering issues */ -static void acpi_video_backlight_notify_work(struct work_struct *work) -{ - if (acpi_video_get_backlight_type() != acpi_backlight_video) - acpi_video_unregister_backlight(); -} - -static int acpi_video_backlight_notify(struct notifier_block *nb, - unsigned long val, void *bd) -{ - struct backlight_device *backlight = bd; - - /* A raw bl registering may change video -> native */ - if (backlight->props.type == BACKLIGHT_RAW && - val == BACKLIGHT_REGISTERED) - schedule_work(&backlight_notify_work); - - return NOTIFY_OK; -} - /* * Determine which type of backlight interface to use on this system, * First check cmdline, then dmi quirks, then do autodetect. - * - * The autodetect order is: - * 1) Is the acpi-video backlight interface supported -> - * no, use a vendor interface - * 2) Is this a win8 "ready" BIOS and do we have a native interface -> - * yes, use a native interface - * 3) Else use the acpi-video interface - * - * Arguably the native on win8 check should be done first, but that would - * be a behavior change, which may causes issues. */ -enum acpi_backlight_type acpi_video_get_backlight_type(void) +static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native) { static DEFINE_MUTEX(init_mutex); + static bool nvidia_wmi_ec_present; + static bool native_available; static bool init_done; static long video_caps; @@ -585,48 +653,60 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void) acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, find_video, NULL, &video_caps, NULL); - INIT_WORK(&backlight_notify_work, - acpi_video_backlight_notify_work); - backlight_nb.notifier_call = acpi_video_backlight_notify; - backlight_nb.priority = 0; - if (backlight_register_notifier(&backlight_nb) == 0) - backlight_notifier_registered = true; + nvidia_wmi_ec_present = nvidia_wmi_ec_supported(); init_done = true; } + if (native) + native_available = true; mutex_unlock(&init_mutex); + /* + * The below heuristics / detection steps are in order of descending + * presedence. The commandline takes presedence over anything else. + */ if (acpi_backlight_cmdline != acpi_backlight_undef) return acpi_backlight_cmdline; + /* DMI quirks override any autodetection. */ if (acpi_backlight_dmi != acpi_backlight_undef) return acpi_backlight_dmi; - if (!(video_caps & ACPI_VIDEO_BACKLIGHT)) - return acpi_backlight_vendor; + /* Special cases such as nvidia_wmi_ec and apple gmux. */ + if (nvidia_wmi_ec_present) + return acpi_backlight_nvidia_wmi_ec; - if (acpi_osi_is_win8() && backlight_device_get_by_type(BACKLIGHT_RAW)) - return acpi_backlight_native; + if (apple_gmux_present()) + return acpi_backlight_apple_gmux; + + /* On systems with ACPI video use either native or ACPI video. */ + if (video_caps & ACPI_VIDEO_BACKLIGHT) { + /* + * Windows 8 and newer no longer use the ACPI video interface, + * so it often does not work. If the ACPI tables are written + * for win8 and native brightness ctl is available, use that. + * + * The native check deliberately is inside the if acpi-video + * block on older devices without acpi-video support native + * is usually not the best choice. + */ + if (acpi_osi_is_win8() && native_available) + return acpi_backlight_native; + else + return acpi_backlight_video; + } - return acpi_backlight_video; + /* No ACPI video (old hw), use vendor specific fw methods. */ + return acpi_backlight_vendor; } -EXPORT_SYMBOL(acpi_video_get_backlight_type); -/* - * Set the preferred backlight interface type based on DMI info. - * This function allows DMI blacklists to be implemented by external - * platform drivers instead of putting a big blacklist in video_detect.c - */ -void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type) +enum acpi_backlight_type acpi_video_get_backlight_type(void) { - acpi_backlight_dmi = type; - /* Remove acpi-video backlight interface if it is no longer desired */ - if (acpi_video_get_backlight_type() != acpi_backlight_video) - acpi_video_unregister_backlight(); + return __acpi_video_get_backlight_type(false); } -EXPORT_SYMBOL(acpi_video_set_dmi_backlight_type); +EXPORT_SYMBOL(acpi_video_get_backlight_type); -void __exit acpi_video_detect_exit(void) +bool acpi_video_backlight_use_native(void) { - if (backlight_notifier_registered) - backlight_unregister_notifier(&backlight_nb); + return __acpi_video_get_backlight_type(true) == acpi_backlight_native; } +EXPORT_SYMBOL(acpi_video_backlight_use_native); diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 066400ed8841..406b4e26f538 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -136,6 +136,10 @@ struct dma_fence *dma_fence_get_stub(void) &dma_fence_stub_ops, &dma_fence_stub_lock, 0, 0); + + set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &dma_fence_stub.flags); + dma_fence_signal_locked(&dma_fence_stub); } spin_unlock(&dma_fence_stub_lock); @@ -161,6 +165,10 @@ struct dma_fence *dma_fence_allocate_private_stub(void) &dma_fence_stub_ops, &dma_fence_stub_lock, 0, 0); + + set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &dma_fence_stub.flags); + dma_fence_signal(fence); return fence; @@ -500,6 +508,8 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) __dma_fence_might_wait(); + dma_fence_enable_sw_signaling(fence); + trace_dma_fence_wait_start(fence); if (fence->ops->wait) ret = fence->ops->wait(fence, intr, timeout); @@ -601,9 +611,6 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence) { unsigned long flags; - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - return; - spin_lock_irqsave(fence->lock, flags); __dma_fence_enable_signaling(fence); spin_unlock_irqrestore(fence->lock, flags); @@ -756,19 +763,16 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) unsigned long flags; signed long ret = timeout ? timeout : 1; - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - return ret; - spin_lock_irqsave(fence->lock, flags); + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + goto out; + if (intr && signal_pending(current)) { ret = -ERESTARTSYS; goto out; } - if (!__dma_fence_enable_signaling(fence)) - goto out; - if (!timeout) { ret = 0; goto out; diff --git a/drivers/dma-buf/st-dma-fence-chain.c b/drivers/dma-buf/st-dma-fence-chain.c index 8ce1ea59d31b..0a9b099d0518 100644 --- a/drivers/dma-buf/st-dma-fence-chain.c +++ b/drivers/dma-buf/st-dma-fence-chain.c @@ -87,6 +87,8 @@ static int sanitycheck(void *arg) if (!chain) err = -ENOMEM; + dma_fence_enable_sw_signaling(chain); + dma_fence_signal(f); dma_fence_put(f); @@ -143,6 +145,8 @@ static int fence_chains_init(struct fence_chains *fc, unsigned int count, } fc->tail = fc->chains[i]; + + dma_fence_enable_sw_signaling(fc->chains[i]); } fc->chain_length = i; diff --git a/drivers/dma-buf/st-dma-fence-unwrap.c b/drivers/dma-buf/st-dma-fence-unwrap.c index 4105d5ea8dde..f0cee984b6c7 100644 --- a/drivers/dma-buf/st-dma-fence-unwrap.c +++ b/drivers/dma-buf/st-dma-fence-unwrap.c @@ -102,6 +102,8 @@ static int sanitycheck(void *arg) if (!f) return -ENOMEM; + dma_fence_enable_sw_signaling(f); + array = mock_array(1, f); if (!array) return -ENOMEM; @@ -124,12 +126,16 @@ static int unwrap_array(void *arg) if (!f1) return -ENOMEM; + dma_fence_enable_sw_signaling(f1); + f2 = mock_fence(); if (!f2) { dma_fence_put(f1); return -ENOMEM; } + dma_fence_enable_sw_signaling(f2); + array = mock_array(2, f1, f2); if (!array) return -ENOMEM; @@ -164,12 +170,16 @@ static int unwrap_chain(void *arg) if (!f1) return -ENOMEM; + dma_fence_enable_sw_signaling(f1); + f2 = mock_fence(); if (!f2) { dma_fence_put(f1); return -ENOMEM; } + dma_fence_enable_sw_signaling(f2); + chain = mock_chain(f1, f2); if (!chain) return -ENOMEM; @@ -204,12 +214,16 @@ static int unwrap_chain_array(void *arg) if (!f1) return -ENOMEM; + dma_fence_enable_sw_signaling(f1); + f2 = mock_fence(); if (!f2) { dma_fence_put(f1); return -ENOMEM; } + dma_fence_enable_sw_signaling(f2); + array = mock_array(2, f1, f2); if (!array) return -ENOMEM; @@ -248,12 +262,16 @@ static int unwrap_merge(void *arg) if (!f1) return -ENOMEM; + dma_fence_enable_sw_signaling(f1); + f2 = mock_fence(); if (!f2) { err = -ENOMEM; goto error_put_f1; } + dma_fence_enable_sw_signaling(f2); + f3 = dma_fence_unwrap_merge(f1, f2); if (!f3) { err = -ENOMEM; @@ -296,10 +314,14 @@ static int unwrap_merge_complex(void *arg) if (!f1) return -ENOMEM; + dma_fence_enable_sw_signaling(f1); + f2 = mock_fence(); if (!f2) goto error_put_f1; + dma_fence_enable_sw_signaling(f2); + f3 = dma_fence_unwrap_merge(f1, f2); if (!f3) goto error_put_f2; diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c index c8a12d7ad71a..fb6e0a6ae2c9 100644 --- a/drivers/dma-buf/st-dma-fence.c +++ b/drivers/dma-buf/st-dma-fence.c @@ -102,6 +102,8 @@ static int sanitycheck(void *arg) if (!f) return -ENOMEM; + dma_fence_enable_sw_signaling(f); + dma_fence_signal(f); dma_fence_put(f); @@ -117,6 +119,8 @@ static int test_signaling(void *arg) if (!f) return -ENOMEM; + dma_fence_enable_sw_signaling(f); + if (dma_fence_is_signaled(f)) { pr_err("Fence unexpectedly signaled on creation\n"); goto err_free; @@ -190,6 +194,8 @@ static int test_late_add_callback(void *arg) if (!f) return -ENOMEM; + dma_fence_enable_sw_signaling(f); + dma_fence_signal(f); if (!dma_fence_add_callback(f, &cb.cb, simple_callback)) { @@ -282,6 +288,8 @@ static int test_status(void *arg) if (!f) return -ENOMEM; + dma_fence_enable_sw_signaling(f); + if (dma_fence_get_status(f)) { pr_err("Fence unexpectedly has signaled status on creation\n"); goto err_free; @@ -308,6 +316,8 @@ static int test_error(void *arg) if (!f) return -ENOMEM; + dma_fence_enable_sw_signaling(f); + dma_fence_set_error(f, -EIO); if (dma_fence_get_status(f)) { @@ -337,6 +347,8 @@ static int test_wait(void *arg) if (!f) return -ENOMEM; + dma_fence_enable_sw_signaling(f); + if (dma_fence_wait_timeout(f, false, 0) != -ETIME) { pr_err("Wait reported complete before being signaled\n"); goto err_free; @@ -379,6 +391,8 @@ static int test_wait_timeout(void *arg) if (!wt.f) return -ENOMEM; + dma_fence_enable_sw_signaling(wt.f); + if (dma_fence_wait_timeout(wt.f, false, 1) != -ETIME) { pr_err("Wait reported complete before being signaled\n"); goto err_free; @@ -458,6 +472,8 @@ static int thread_signal_callback(void *arg) break; } + dma_fence_enable_sw_signaling(f1); + rcu_assign_pointer(t->fences[t->id], f1); smp_wmb(); diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c index 813779e3c9be..15dbea1462ed 100644 --- a/drivers/dma-buf/st-dma-resv.c +++ b/drivers/dma-buf/st-dma-resv.c @@ -45,6 +45,8 @@ static int sanitycheck(void *arg) if (!f) return -ENOMEM; + dma_fence_enable_sw_signaling(f); + dma_fence_signal(f); dma_fence_put(f); @@ -69,6 +71,8 @@ static int test_signaling(void *arg) if (!f) return -ENOMEM; + dma_fence_enable_sw_signaling(f); + dma_resv_init(&resv); r = dma_resv_lock(&resv, NULL); if (r) { @@ -114,6 +118,8 @@ static int test_for_each(void *arg) if (!f) return -ENOMEM; + dma_fence_enable_sw_signaling(f); + dma_resv_init(&resv); r = dma_resv_lock(&resv, NULL); if (r) { @@ -173,6 +179,8 @@ static int test_for_each_unlocked(void *arg) if (!f) return -ENOMEM; + dma_fence_enable_sw_signaling(f); + dma_resv_init(&resv); r = dma_resv_lock(&resv, NULL); if (r) { @@ -244,6 +252,8 @@ static int test_get_fences(void *arg) if (!f) return -ENOMEM; + dma_fence_enable_sw_signaling(f); + dma_resv_init(&resv); r = dma_resv_lock(&resv, NULL); if (r) { diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 2f52e8941074..198ba846d34b 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -235,6 +235,13 @@ config DRM_RADEON select HWMON select BACKLIGHT_CLASS_DEVICE select INTERVAL_TREE + # radeon depends on ACPI_VIDEO when ACPI is enabled, for select to work + # ACPI_VIDEO's dependencies must also be selected. + select INPUT if ACPI + select ACPI_VIDEO if ACPI + # On x86 ACPI_VIDEO also needs ACPI_WMI + select X86_PLATFORM_DEVICES if ACPI && X86 + select ACPI_WMI if ACPI && X86 help Choose this option if you have an ATI Radeon graphics card. There are both PCI and AGP versions. You don't need to choose this to @@ -260,6 +267,13 @@ config DRM_AMDGPU select BACKLIGHT_CLASS_DEVICE select INTERVAL_TREE select DRM_BUDDY + # amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work + # ACPI_VIDEO's dependencies must also be selected. + select INPUT if ACPI + select ACPI_VIDEO if ACPI + # On x86 ACPI_VIDEO also needs ACPI_WMI + select X86_PLATFORM_DEVICES if ACPI && X86 + select ACPI_WMI if ACPI && X86 help Choose this option if you have a recent AMD Radeon graphics card. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index d1a2619fa89f..73a517bcf5c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -746,7 +746,7 @@ static bool amdgpu_vram_mgr_intersects(struct ttm_resource_manager *man, (amdgpu_vram_mgr_block_size(block) >> PAGE_SHIFT); if (place->fpfn < lpfn && - (place->lpfn && place->lpfn > fpfn)) + (!place->lpfn || place->lpfn > fpfn)) return true; } diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index fa7421afb9a6..6be9ac2b9c5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -26,6 +26,8 @@ #include <linux/pci.h> +#include <acpi/video.h> + #include <drm/drm_crtc_helper.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" @@ -182,7 +184,12 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode return; if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) - return; + goto register_acpi_backlight; + + if (!acpi_video_backlight_use_native()) { + drm_info(dev, "Skipping amdgpu atom DIG backlight registration\n"); + goto register_acpi_backlight; + } pdata = kmalloc(sizeof(struct amdgpu_backlight_privdata), GFP_KERNEL); if (!pdata) { @@ -218,6 +225,11 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode error: kfree(pdata); return; + +register_acpi_backlight: + /* Try registering an ACPI video backlight device instead. */ + acpi_video_register_backlight(); + return; } void diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index e53562d9362b..4c73727e0b7d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -90,6 +90,8 @@ #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_plane_helper.h> +#include <acpi/video.h> + #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" #include "dcn/dcn_1_0_offset.h" @@ -4038,6 +4040,13 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps); dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL; + if (!acpi_video_backlight_use_native()) { + drm_info(adev_to_drm(dm->adev), "Skipping amdgpu DM backlight registration\n"); + /* Try registering an ACPI video backlight device instead. */ + acpi_video_register_backlight(); + return; + } + props.max_brightness = AMDGPU_MAX_BL_LEVEL; props.brightness = AMDGPU_MAX_BL_LEVEL; props.type = BACKLIGHT_RAW; diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h index 6e0be6027705..01a7d66864f2 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h @@ -401,8 +401,6 @@ extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr); extern int phm_setup_asic(struct pp_hwmgr *hwmgr); extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr); extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr); -extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr); -extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block); extern int phm_set_power_state(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pcurrent_state, const struct pp_hw_power_state *pnew_power_state); diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 760b27971557..b9392f31e629 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -39,7 +39,7 @@ #include "ast_drv.h" -int ast_modeset = -1; +static int ast_modeset = -1; MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); module_param_named(modeset, ast_modeset, int, 0400); diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 103bd4fa698b..1bc0220e6783 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -113,6 +113,9 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format, case 1024: vbios_mode->enh_table = &res_1024x768[refresh_rate_index]; break; + case 1152: + vbios_mode->enh_table = &res_1152x864[refresh_rate_index]; + break; case 1280: if (mode->crtc_vdisplay == 800) vbios_mode->enh_table = &res_1280x800[refresh_rate_index]; @@ -310,7 +313,7 @@ static void ast_set_crtc_reg(struct ast_private *ast, u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0; u16 temp, precache = 0; - if ((ast->chip == AST2500) && + if ((ast->chip == AST2500 || ast->chip == AST2600) && (vbios_mode->enh_table->flags & AST2500PreCatchCRT)) precache = 40; @@ -351,6 +354,12 @@ static void ast_set_crtc_reg(struct ast_private *ast, ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAC, 0x00, jregAC); ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAD, 0x00, jregAD); + // Workaround for HSync Time non octave pixels (1920x1080@60Hz HSync 44 pixels); + if ((ast->chip == AST2600) && (mode->crtc_vdisplay == 1080)) + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xFC, 0xFD, 0x02); + else + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xFC, 0xFD, 0x00); + /* vert timings */ temp = (mode->crtc_vtotal) - 2; if (temp & 0x100) @@ -428,7 +437,7 @@ static void ast_set_dclk_reg(struct ast_private *ast, { const struct ast_vbios_dclk_info *clk_info; - if (ast->chip == AST2500) + if ((ast->chip == AST2500) || (ast->chip == AST2600)) clk_info = &dclk_table_ast2500[vbios_mode->enh_table->dclk_index]; else clk_info = &dclk_table[vbios_mode->enh_table->dclk_index]; @@ -1057,6 +1066,8 @@ ast_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode return MODE_OK; if ((mode->hdisplay == 1600) && (mode->vdisplay == 900)) return MODE_OK; + if ((mode->hdisplay == 1152) && (mode->vdisplay == 864)) + return MODE_OK; if ((ast->chip == AST2100) || (ast->chip == AST2200) || (ast->chip == AST2300) || (ast->chip == AST2400) || @@ -1089,6 +1100,10 @@ ast_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode if (mode->vdisplay == 768) status = MODE_OK; break; + case 1152: + if (mode->vdisplay == 864) + status = MODE_OK; + break; case 1280: if (mode->vdisplay == 1024) status = MODE_OK; diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h index dbe1cc620f6e..0378c9bc079b 100644 --- a/drivers/gpu/drm/ast/ast_tables.h +++ b/drivers/gpu/drm/ast/ast_tables.h @@ -272,6 +272,13 @@ static const struct ast_vbios_enhtable res_1600x1200[] = { (SyncPP | Charx8Dot), 0xFF, 1, 0x33 }, }; +static const struct ast_vbios_enhtable res_1152x864[] = { + {1600, 1152, 64, 128, 900, 864, 1, 3, VCLK108, /* 75Hz */ + (SyncPP | Charx8Dot | NewModeInfo), 75, 1, 0x3B }, + {1600, 1152, 64, 128, 900, 864, 1, 3, VCLK108, /* end */ + (SyncPP | Charx8Dot | NewModeInfo), 0xFF, 1, 0x3B }, +}; + /* 16:9 */ static const struct ast_vbios_enhtable res_1360x768[] = { {1792, 1360, 64, 112, 795, 768, 3, 6, VCLK85_5, /* 60Hz */ diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c index c5719908ce2d..ba060277c3fd 100644 --- a/drivers/gpu/drm/bridge/chrontel-ch7033.c +++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c @@ -68,7 +68,6 @@ enum { BYTE_SWAP_GBR = 3, BYTE_SWAP_BRG = 4, BYTE_SWAP_BGR = 5, - BYTE_SWAP_MAX = 6, }; /* Page 0, Register 0x19 */ @@ -356,8 +355,6 @@ static void ch7033_bridge_mode_set(struct drm_bridge *bridge, int hsynclen = mode->hsync_end - mode->hsync_start; int vbporch = mode->vsync_start - mode->vdisplay; int vsynclen = mode->vsync_end - mode->vsync_start; - u8 byte_swap; - int ret; /* * Page 4 @@ -401,16 +398,8 @@ static void ch7033_bridge_mode_set(struct drm_bridge *bridge, regmap_write(priv->regmap, 0x15, vbporch); regmap_write(priv->regmap, 0x16, vsynclen); - /* Input color swap. Byte order is optional and will default to - * BYTE_SWAP_BGR to preserve backwards compatibility with existing - * driver. - */ - ret = of_property_read_u8(priv->bridge.of_node, "chrontel,byteswap", - &byte_swap); - if (!ret && byte_swap < BYTE_SWAP_MAX) - regmap_update_bits(priv->regmap, 0x18, SWAP, byte_swap); - else - regmap_update_bits(priv->regmap, 0x18, SWAP, BYTE_SWAP_BGR); + /* Input color swap. */ + regmap_update_bits(priv->regmap, 0x18, SWAP, BYTE_SWAP_BGR); /* Input clock and sync polarity. */ regmap_update_bits(priv->regmap, 0x19, 0x1, mode->clock >> 16); diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c index 2bb957cffd94..2767b70fa2cb 100644 --- a/drivers/gpu/drm/bridge/ite-it6505.c +++ b/drivers/gpu/drm/bridge/ite-it6505.c @@ -563,7 +563,7 @@ static void it6505_debug_print(struct it6505 *it6505, unsigned int reg, struct device *dev = &it6505->client->dev; int val; - if (likely(!(__drm_debug & DRM_UT_DRIVER))) + if (!drm_debug_enabled(DRM_UT_DRIVER)) return; val = it6505_read(it6505, reg); diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c index 28bad30dc4e5..3e870d45f881 100644 --- a/drivers/gpu/drm/bridge/lontium-lt8912b.c +++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c @@ -165,30 +165,38 @@ static int lt8912_write_rxlogicres_config(struct lt8912 *lt) return ret; }; +/* enable LVDS output with some hardcoded configuration, not required for the HDMI output */ static int lt8912_write_lvds_config(struct lt8912 *lt) { const struct reg_sequence seq[] = { + // lvds power up {0x44, 0x30}, {0x51, 0x05}, - {0x50, 0x24}, - {0x51, 0x2d}, - {0x52, 0x04}, - {0x69, 0x0e}, + + // core pll bypass + {0x50, 0x24}, // cp=50uA + {0x51, 0x2d}, // Pix_clk as reference, second order passive LPF PLL + {0x52, 0x04}, // loopdiv=0, use second-order PLL + {0x69, 0x0e}, // CP_PRESET_DIV_RATIO {0x69, 0x8e}, {0x6a, 0x00}, - {0x6c, 0xb8}, + {0x6c, 0xb8}, // RGD_CP_SOFT_K_EN,RGD_CP_SOFT_K[13:8] {0x6b, 0x51}, - {0x04, 0xfb}, + + {0x04, 0xfb}, // core pll reset {0x04, 0xff}, - {0x7f, 0x00}, - {0xa8, 0x13}, - {0x02, 0xf7}, + + // scaler bypass + {0x7f, 0x00}, // disable scaler + {0xa8, 0x13}, // 0x13: JEIDA, 0x33: VESA + + {0x02, 0xf7}, // lvds pll reset {0x02, 0xff}, {0x03, 0xcf}, {0x03, 0xff}, }; - return regmap_multi_reg_write(lt->regmap[I2C_CEC_DSI], seq, ARRAY_SIZE(seq)); + return regmap_multi_reg_write(lt->regmap[I2C_MAIN], seq, ARRAY_SIZE(seq)); }; static inline struct lt8912 *bridge_to_lt8912(struct drm_bridge *b) @@ -268,7 +276,7 @@ static int lt8912_video_setup(struct lt8912 *lt) u32 hactive, h_total, hpw, hfp, hbp; u32 vactive, v_total, vpw, vfp, vbp; u8 settle = 0x08; - int ret; + int ret, hsync_activehigh, vsync_activehigh; if (!lt) return -EINVAL; @@ -278,12 +286,14 @@ static int lt8912_video_setup(struct lt8912 *lt) hpw = lt->mode.hsync_len; hbp = lt->mode.hback_porch; h_total = hactive + hfp + hpw + hbp; + hsync_activehigh = lt->mode.flags & DISPLAY_FLAGS_HSYNC_HIGH; vactive = lt->mode.vactive; vfp = lt->mode.vfront_porch; vpw = lt->mode.vsync_len; vbp = lt->mode.vback_porch; v_total = vactive + vfp + vpw + vbp; + vsync_activehigh = lt->mode.flags & DISPLAY_FLAGS_VSYNC_HIGH; if (vactive <= 600) settle = 0x04; @@ -317,6 +327,13 @@ static int lt8912_video_setup(struct lt8912 *lt) ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3e, hfp & 0xff); ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3f, hfp >> 8); + ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xab, BIT(0), + vsync_activehigh ? BIT(0) : 0); + ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xab, BIT(1), + hsync_activehigh ? BIT(1) : 0); + ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xb2, BIT(0), + lt->connector.display_info.is_hdmi ? BIT(0) : 0); + return ret; } diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c index 7d2ed0ed2fe2..4efb62bcdb63 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c @@ -542,8 +542,8 @@ static int snd_dw_hdmi_probe(struct platform_device *pdev) if (ret < 0) return ret; - strlcpy(card->driver, DRIVER_NAME, sizeof(card->driver)); - strlcpy(card->shortname, "DW-HDMI", sizeof(card->shortname)); + strscpy(card->driver, DRIVER_NAME, sizeof(card->driver)); + strscpy(card->shortname, "DW-HDMI", sizeof(card->shortname)); snprintf(card->longname, sizeof(card->longname), "%s rev 0x%02x, irq %d", card->shortname, revision, data->irq); @@ -561,7 +561,7 @@ static int snd_dw_hdmi_probe(struct platform_device *pdev) dw->pcm = pcm; pcm->private_data = dw; - strlcpy(pcm->name, DRIVER_NAME, sizeof(pcm->name)); + strscpy(pcm->name, DRIVER_NAME, sizeof(pcm->name)); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_dw_hdmi_ops); /* diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c index 92990a3d577a..9f055d9710ea 100644 --- a/drivers/gpu/drm/display/drm_dp_helper.c +++ b/drivers/gpu/drm/display/drm_dp_helper.c @@ -2670,17 +2670,8 @@ int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux, struct drm_dp_phy_test_params *data, u8 dp_rev) { int err, i; - u8 link_config[2]; u8 test_pattern; - link_config[0] = drm_dp_link_rate_to_bw_code(data->link_rate); - link_config[1] = data->num_lanes; - if (data->enhanced_frame_cap) - link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; - err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, link_config, 2); - if (err < 0) - return err; - test_pattern = data->phy_pattern; if (dp_rev < 0x12) { test_pattern = (test_pattern << 2) & diff --git a/drivers/gpu/drm/display/drm_scdc_helper.c b/drivers/gpu/drm/display/drm_scdc_helper.c index 81881e81ceae..c3ad4ab2b456 100644 --- a/drivers/gpu/drm/display/drm_scdc_helper.c +++ b/drivers/gpu/drm/display/drm_scdc_helper.c @@ -35,6 +35,19 @@ * HDMI 2.0 specification. It is a point-to-point protocol that allows the * HDMI source and HDMI sink to exchange data. The same I2C interface that * is used to access EDID serves as the transport mechanism for SCDC. + * + * Note: The SCDC status is going to be lost when the display is + * disconnected. This can happen physically when the user disconnects + * the cable, but also when a display is switched on (such as waking up + * a TV). + * + * This is further complicated by the fact that, upon a disconnection / + * reconnection, KMS won't change the mode on its own. This means that + * one can't just rely on setting the SCDC status on enable, but also + * has to track the connector status changes using interrupts and + * restore the SCDC status. The typical solution for this is to trigger an + * empty modeset in drm_connector_helper_funcs.detect_ctx(), like what vc4 does + * in vc4_hdmi_reset_link(). */ #define SCDC_I2C_SLAVE_ADDRESS 0x54 diff --git a/drivers/gpu/drm/drm_aperture.c b/drivers/gpu/drm/drm_aperture.c index fdb7d5c17ba1..3b8fdeeafd53 100644 --- a/drivers/gpu/drm/drm_aperture.c +++ b/drivers/gpu/drm/drm_aperture.c @@ -74,7 +74,7 @@ * given framebuffer memory. Ownership of the framebuffer memory is achieved * by calling devm_aperture_acquire_from_firmware(). On success, the driver * is the owner of the framebuffer range. The function fails if the - * framebuffer is already by another driver. See below for an example. + * framebuffer is already owned by another driver. See below for an example. * * .. code-block:: c * @@ -112,7 +112,7 @@ * * The generic driver is now subject to forced removal by other drivers. This * only works for platform drivers that support hot unplug. - * When a driver calls drm_aperture_remove_conflicting_framebuffers() et al + * When a driver calls drm_aperture_remove_conflicting_framebuffers() et al. * for the registered framebuffer range, the aperture helpers call * platform_device_unregister() and the generic driver unloads itself. It * may not access the device's registers, framebuffer memory, ROM, etc @@ -164,7 +164,7 @@ EXPORT_SYMBOL(devm_aperture_acquire_from_firmware); * @primary: also kick vga16fb if present * @req_driver: requesting DRM driver * - * This function removes graphics device drivers which use memory range described by + * This function removes graphics device drivers which use the memory range described by * @base and @size. * * Returns: @@ -182,8 +182,8 @@ EXPORT_SYMBOL(drm_aperture_remove_conflicting_framebuffers); * @pdev: PCI device * @req_driver: requesting DRM driver * - * This function removes graphics device drivers using memory range configured - * for any of @pdev's memory bars. The function assumes that PCI device with + * This function removes graphics device drivers using the memory range configured + * for any of @pdev's memory bars. The function assumes that a PCI device with * shadowed ROM drives a primary display and so kicks out vga16fb. * * Returns: diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index ee5fea48b5cb..98cc3137c062 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -786,7 +786,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, EXPORT_SYMBOL(drm_atomic_helper_check_modeset); /** - * drm_atomic_helper_check_wb_connector_state() - Check writeback encoder state + * drm_atomic_helper_check_wb_encoder_state() - Check writeback encoder state * @encoder: encoder state to check * @conn_state: connector state to check * diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c index 937b699ac2a8..d8b2955e88fd 100644 --- a/drivers/gpu/drm/drm_damage_helper.c +++ b/drivers/gpu/drm/drm_damage_helper.c @@ -224,6 +224,7 @@ drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter, const struct drm_plane_state *old_state, const struct drm_plane_state *state) { + struct drm_rect src; memset(iter, 0, sizeof(*iter)); if (!state || !state->crtc || !state->fb || !state->visible) @@ -233,10 +234,12 @@ drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter, iter->num_clips = drm_plane_get_damage_clips_count(state); /* Round down for x1/y1 and round up for x2/y2 to catch all pixels */ - iter->plane_src.x1 = state->src.x1 >> 16; - iter->plane_src.y1 = state->src.y1 >> 16; - iter->plane_src.x2 = (state->src.x2 >> 16) + !!(state->src.x2 & 0xFFFF); - iter->plane_src.y2 = (state->src.y2 >> 16) + !!(state->src.y2 & 0xFFFF); + src = drm_plane_state_src(state); + + iter->plane_src.x1 = src.x1 >> 16; + iter->plane_src.y1 = src.y1 >> 16; + iter->plane_src.x2 = (src.x2 >> 16) + !!(src.x2 & 0xFFFF); + iter->plane_src.y2 = (src.y2 >> 16) + !!(src.y2 & 0xFFFF); if (!iter->clips || !drm_rect_equals(&state->src, &old_state->src)) { iter->clips = NULL; diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c index 56642816fdff..e2f76621453c 100644 --- a/drivers/gpu/drm/drm_format_helper.c +++ b/drivers/gpu/drm/drm_format_helper.c @@ -553,6 +553,7 @@ void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *d drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, drm_fb_xrgb8888_to_xrgb2101010_line); } +EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010); static void drm_fb_xrgb8888_to_gray8_line(void *dbuf, const void *sbuf, unsigned int pixels) { @@ -793,3 +794,111 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc kfree(src32); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono); + +static bool is_listed_fourcc(const uint32_t *fourccs, size_t nfourccs, uint32_t fourcc) +{ + const uint32_t *fourccs_end = fourccs + nfourccs; + + while (fourccs < fourccs_end) { + if (*fourccs == fourcc) + return true; + ++fourccs; + } + return false; +} + +/** + * drm_fb_build_fourcc_list - Filters a list of supported color formats against + * the device's native formats + * @dev: DRM device + * @native_fourccs: 4CC codes of natively supported color formats + * @native_nfourccs: The number of entries in @native_fourccs + * @driver_fourccs: 4CC codes of all driver-supported color formats + * @driver_nfourccs: The number of entries in @driver_fourccs + * @fourccs_out: Returns 4CC codes of supported color formats + * @nfourccs_out: The number of available entries in @fourccs_out + * + * This function create a list of supported color format from natively + * supported formats and the emulated formats. + * At a minimum, most userspace programs expect at least support for + * XRGB8888 on the primary plane. Devices that have to emulate the + * format, and possibly others, can use drm_fb_build_fourcc_list() to + * create a list of supported color formats. The returned list can + * be handed over to drm_universal_plane_init() et al. Native formats + * will go before emulated formats. Other heuristics might be applied + * to optimize the order. Formats near the beginning of the list are + * usually preferred over formats near the end of the list. + * + * Returns: + * The number of color-formats 4CC codes returned in @fourccs_out. + */ +size_t drm_fb_build_fourcc_list(struct drm_device *dev, + const u32 *native_fourccs, size_t native_nfourccs, + const u32 *driver_fourccs, size_t driver_nfourccs, + u32 *fourccs_out, size_t nfourccs_out) +{ + u32 *fourccs = fourccs_out; + const u32 *fourccs_end = fourccs_out + nfourccs_out; + bool found_native = false; + size_t i; + + /* + * The device's native formats go first. + */ + + for (i = 0; i < native_nfourccs; ++i) { + u32 fourcc = native_fourccs[i]; + + if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) { + continue; /* skip duplicate entries */ + } else if (fourccs == fourccs_end) { + drm_warn(dev, "Ignoring native format %p4cc\n", &fourcc); + continue; /* end of available output buffer */ + } + + drm_dbg_kms(dev, "adding native format %p4cc\n", &fourcc); + + if (!found_native) + found_native = is_listed_fourcc(driver_fourccs, driver_nfourccs, fourcc); + *fourccs = fourcc; + ++fourccs; + } + + /* + * The plane's atomic_update helper converts the framebuffer's color format + * to a native format when copying to device memory. + * + * If there is not a single format supported by both, device and + * driver, the native formats are likely not supported by the conversion + * helpers. Therefore *only* support the native formats and add a + * conversion helper ASAP. + */ + if (!found_native) { + drm_warn(dev, "Format conversion helpers required to add extra formats.\n"); + goto out; + } + + /* + * The extra formats, emulated by the driver, go second. + */ + + for (i = 0; (i < driver_nfourccs) && (fourccs < fourccs_end); ++i) { + u32 fourcc = driver_fourccs[i]; + + if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) { + continue; /* skip duplicate and native entries */ + } else if (fourccs == fourccs_end) { + drm_warn(dev, "Ignoring emulated format %p4cc\n", &fourcc); + continue; /* end of available output buffer */ + } + + drm_dbg_kms(dev, "adding emulated format %p4cc\n", &fourcc); + + *fourccs = fourcc; + ++fourccs; + } + +out: + return fourccs - fourccs_out; +} +EXPORT_SYMBOL(drm_fb_build_fourcc_list); diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 185b04762e2c..2dd97473ca10 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -87,13 +87,13 @@ int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y, src_x > fb_width - src_w || src_h > fb_height || src_y > fb_height - src_h) { - DRM_DEBUG_KMS("Invalid source coordinates " - "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n", - src_w >> 16, ((src_w & 0xffff) * 15625) >> 10, - src_h >> 16, ((src_h & 0xffff) * 15625) >> 10, - src_x >> 16, ((src_x & 0xffff) * 15625) >> 10, - src_y >> 16, ((src_y & 0xffff) * 15625) >> 10, - fb->width, fb->height); + drm_dbg_kms(fb->dev, "Invalid source coordinates " + "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n", + src_w >> 16, ((src_w & 0xffff) * 15625) >> 10, + src_h >> 16, ((src_h & 0xffff) * 15625) >> 10, + src_x >> 16, ((src_x & 0xffff) * 15625) >> 10, + src_y >> 16, ((src_y & 0xffff) * 15625) >> 10, + fb->width, fb->height); return -ENOSPC; } @@ -125,7 +125,7 @@ int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or, r.pixel_format = drm_driver_legacy_fb_format(dev, or->bpp, or->depth); if (r.pixel_format == DRM_FORMAT_INVALID) { - DRM_DEBUG("bad {bpp:%d, depth:%d}\n", or->bpp, or->depth); + drm_dbg_kms(dev, "bad {bpp:%d, depth:%d}\n", or->bpp, or->depth); return -EINVAL; } @@ -177,18 +177,18 @@ static int framebuffer_check(struct drm_device *dev, /* check if the format is supported at all */ if (!__drm_format_info(r->pixel_format)) { - DRM_DEBUG_KMS("bad framebuffer format %p4cc\n", - &r->pixel_format); + drm_dbg_kms(dev, "bad framebuffer format %p4cc\n", + &r->pixel_format); return -EINVAL; } if (r->width == 0) { - DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width); + drm_dbg_kms(dev, "bad framebuffer width %u\n", r->width); return -EINVAL; } if (r->height == 0) { - DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height); + drm_dbg_kms(dev, "bad framebuffer height %u\n", r->height); return -EINVAL; } @@ -202,12 +202,12 @@ static int framebuffer_check(struct drm_device *dev, u64 min_pitch = drm_format_info_min_pitch(info, i, width); if (!block_size && (r->modifier[i] == DRM_FORMAT_MOD_LINEAR)) { - DRM_DEBUG_KMS("Format requires non-linear modifier for plane %d\n", i); + drm_dbg_kms(dev, "Format requires non-linear modifier for plane %d\n", i); return -EINVAL; } if (!r->handles[i]) { - DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i); + drm_dbg_kms(dev, "no buffer object handle for plane %d\n", i); return -EINVAL; } @@ -218,20 +218,20 @@ static int framebuffer_check(struct drm_device *dev, return -ERANGE; if (block_size && r->pitches[i] < min_pitch) { - DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i); + drm_dbg_kms(dev, "bad pitch %u for plane %d\n", r->pitches[i], i); return -EINVAL; } if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) { - DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n", - r->modifier[i], i); + drm_dbg_kms(dev, "bad fb modifier %llu for plane %d\n", + r->modifier[i], i); return -EINVAL; } if (r->flags & DRM_MODE_FB_MODIFIERS && r->modifier[i] != r->modifier[0]) { - DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n", - r->modifier[i], i); + drm_dbg_kms(dev, "bad fb modifier %llu for plane %d\n", + r->modifier[i], i); return -EINVAL; } @@ -244,7 +244,7 @@ static int framebuffer_check(struct drm_device *dev, if (r->pixel_format != DRM_FORMAT_NV12 || width % 128 || height % 32 || r->pitches[i] % 128) { - DRM_DEBUG_KMS("bad modifier data for plane %d\n", i); + drm_dbg_kms(dev, "bad modifier data for plane %d\n", i); return -EINVAL; } break; @@ -256,7 +256,7 @@ static int framebuffer_check(struct drm_device *dev, for (i = info->num_planes; i < 4; i++) { if (r->modifier[i]) { - DRM_DEBUG_KMS("non-zero modifier for unused plane %d\n", i); + drm_dbg_kms(dev, "non-zero modifier for unused plane %d\n", i); return -EINVAL; } @@ -265,17 +265,17 @@ static int framebuffer_check(struct drm_device *dev, continue; if (r->handles[i]) { - DRM_DEBUG_KMS("buffer object handle for unused plane %d\n", i); + drm_dbg_kms(dev, "buffer object handle for unused plane %d\n", i); return -EINVAL; } if (r->pitches[i]) { - DRM_DEBUG_KMS("non-zero pitch for unused plane %d\n", i); + drm_dbg_kms(dev, "non-zero pitch for unused plane %d\n", i); return -EINVAL; } if (r->offsets[i]) { - DRM_DEBUG_KMS("non-zero offset for unused plane %d\n", i); + drm_dbg_kms(dev, "non-zero offset for unused plane %d\n", i); return -EINVAL; } } @@ -293,24 +293,24 @@ drm_internal_framebuffer_create(struct drm_device *dev, int ret; if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) { - DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags); + drm_dbg_kms(dev, "bad framebuffer flags 0x%08x\n", r->flags); return ERR_PTR(-EINVAL); } if ((config->min_width > r->width) || (r->width > config->max_width)) { - DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n", - r->width, config->min_width, config->max_width); + drm_dbg_kms(dev, "bad framebuffer width %d, should be >= %d && <= %d\n", + r->width, config->min_width, config->max_width); return ERR_PTR(-EINVAL); } if ((config->min_height > r->height) || (r->height > config->max_height)) { - DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n", - r->height, config->min_height, config->max_height); + drm_dbg_kms(dev, "bad framebuffer height %d, should be >= %d && <= %d\n", + r->height, config->min_height, config->max_height); return ERR_PTR(-EINVAL); } if (r->flags & DRM_MODE_FB_MODIFIERS && dev->mode_config.fb_modifiers_not_supported) { - DRM_DEBUG_KMS("driver does not support fb modifiers\n"); + drm_dbg_kms(dev, "driver does not support fb modifiers\n"); return ERR_PTR(-EINVAL); } @@ -320,7 +320,7 @@ drm_internal_framebuffer_create(struct drm_device *dev, fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); if (IS_ERR(fb)) { - DRM_DEBUG_KMS("could not create framebuffer\n"); + drm_dbg_kms(dev, "could not create framebuffer\n"); return fb; } @@ -356,7 +356,7 @@ int drm_mode_addfb2(struct drm_device *dev, if (IS_ERR(fb)) return PTR_ERR(fb); - DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); + drm_dbg_kms(dev, "[FB:%d]\n", fb->base.id); r->fb_id = fb->base.id; /* Transfer ownership to the filp for reaping on close */ @@ -384,7 +384,7 @@ int drm_mode_addfb2_ioctl(struct drm_device *dev, * then. So block it to make userspace fallback to * ADDFB. */ - DRM_DEBUG_KMS("addfb2 broken on bigendian"); + drm_dbg_kms(dev, "addfb2 broken on bigendian"); return -EOPNOTSUPP; } #endif diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 86d670c71286..33f1d66dda60 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -165,6 +165,7 @@ void drm_gem_private_object_init(struct drm_device *dev, obj->resv = &obj->_resv; drm_vma_node_reset(&obj->vma_node); + INIT_LIST_HEAD(&obj->lru_node); } EXPORT_SYMBOL(drm_gem_private_object_init); @@ -951,6 +952,7 @@ drm_gem_object_release(struct drm_gem_object *obj) dma_resv_fini(&obj->_resv); drm_gem_free_mmap_offset(obj); + drm_gem_lru_remove(obj); } EXPORT_SYMBOL(drm_gem_object_release); @@ -1274,3 +1276,171 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, ww_acquire_fini(acquire_ctx); } EXPORT_SYMBOL(drm_gem_unlock_reservations); + +/** + * drm_gem_lru_init - initialize a LRU + * + * @lru: The LRU to initialize + * @lock: The lock protecting the LRU + */ +void +drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock) +{ + lru->lock = lock; + lru->count = 0; + INIT_LIST_HEAD(&lru->list); +} +EXPORT_SYMBOL(drm_gem_lru_init); + +static void +drm_gem_lru_remove_locked(struct drm_gem_object *obj) +{ + obj->lru->count -= obj->size >> PAGE_SHIFT; + WARN_ON(obj->lru->count < 0); + list_del(&obj->lru_node); + obj->lru = NULL; +} + +/** + * drm_gem_lru_remove - remove object from whatever LRU it is in + * + * If the object is currently in any LRU, remove it. + * + * @obj: The GEM object to remove from current LRU + */ +void +drm_gem_lru_remove(struct drm_gem_object *obj) +{ + struct drm_gem_lru *lru = obj->lru; + + if (!lru) + return; + + mutex_lock(lru->lock); + drm_gem_lru_remove_locked(obj); + mutex_unlock(lru->lock); +} +EXPORT_SYMBOL(drm_gem_lru_remove); + +static void +drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj) +{ + lockdep_assert_held_once(lru->lock); + + if (obj->lru) + drm_gem_lru_remove_locked(obj); + + lru->count += obj->size >> PAGE_SHIFT; + list_add_tail(&obj->lru_node, &lru->list); + obj->lru = lru; +} + +/** + * drm_gem_lru_move_tail - move the object to the tail of the LRU + * + * If the object is already in this LRU it will be moved to the + * tail. Otherwise it will be removed from whichever other LRU + * it is in (if any) and moved into this LRU. + * + * @lru: The LRU to move the object into. + * @obj: The GEM object to move into this LRU + */ +void +drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj) +{ + mutex_lock(lru->lock); + drm_gem_lru_move_tail_locked(lru, obj); + mutex_unlock(lru->lock); +} +EXPORT_SYMBOL(drm_gem_lru_move_tail); + +/** + * drm_gem_lru_scan - helper to implement shrinker.scan_objects + * + * If the shrink callback succeeds, it is expected that the driver + * move the object out of this LRU. + * + * If the LRU possibly contain active buffers, it is the responsibility + * of the shrink callback to check for this (ie. dma_resv_test_signaled()) + * or if necessary block until the buffer becomes idle. + * + * @lru: The LRU to scan + * @nr_to_scan: The number of pages to try to reclaim + * @shrink: Callback to try to shrink/reclaim the object. + */ +unsigned long +drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan, + bool (*shrink)(struct drm_gem_object *obj)) +{ + struct drm_gem_lru still_in_lru; + struct drm_gem_object *obj; + unsigned freed = 0; + + drm_gem_lru_init(&still_in_lru, lru->lock); + + mutex_lock(lru->lock); + + while (freed < nr_to_scan) { + obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node); + + if (!obj) + break; + + drm_gem_lru_move_tail_locked(&still_in_lru, obj); + + /* + * If it's in the process of being freed, gem_object->free() + * may be blocked on lock waiting to remove it. So just + * skip it. + */ + if (!kref_get_unless_zero(&obj->refcount)) + continue; + + /* + * Now that we own a reference, we can drop the lock for the + * rest of the loop body, to reduce contention with other + * code paths that need the LRU lock + */ + mutex_unlock(lru->lock); + + /* + * Note that this still needs to be trylock, since we can + * hit shrinker in response to trying to get backing pages + * for this obj (ie. while it's lock is already held) + */ + if (!dma_resv_trylock(obj->resv)) + goto tail; + + if (shrink(obj)) { + freed += obj->size >> PAGE_SHIFT; + + /* + * If we succeeded in releasing the object's backing + * pages, we expect the driver to have moved the object + * out of this LRU + */ + WARN_ON(obj->lru == &still_in_lru); + WARN_ON(obj->lru == lru); + } + + dma_resv_unlock(obj->resv); + +tail: + drm_gem_object_put(obj); + mutex_lock(lru->lock); + } + + /* + * Move objects we've skipped over out of the temporary still_in_lru + * back into this LRU + */ + list_for_each_entry (obj, &still_in_lru.list, lru_node) + obj->lru = lru; + list_splice_tail(&still_in_lru.list, &lru->list); + lru->count += still_in_lru.count; + + mutex_unlock(lru->lock); + + return freed; +} +EXPORT_SYMBOL(drm_gem_lru_scan); diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index 84abc3920b57..a6ac56580876 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -310,6 +310,24 @@ err_drm_dev_exit: } /** + * mipi_dbi_pipe_mode_valid - MIPI DBI mode-valid helper + * @pipe: Simple display pipe + * @mode: The mode to test + * + * This function validates a given display mode against the MIPI DBI's hardware + * display. Drivers can use this as their &drm_simple_display_pipe_funcs->mode_valid + * callback. + */ +enum drm_mode_status mipi_dbi_pipe_mode_valid(struct drm_simple_display_pipe *pipe, + const struct drm_display_mode *mode) +{ + struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev); + + return drm_crtc_helper_mode_valid_fixed(&pipe->crtc, mode, &dbidev->mode); +} +EXPORT_SYMBOL(mipi_dbi_pipe_mode_valid); + +/** * mipi_dbi_pipe_update - Display pipe update helper * @pipe: Simple display pipe * @old_state: Old plane state @@ -415,26 +433,8 @@ EXPORT_SYMBOL(mipi_dbi_pipe_disable); static int mipi_dbi_connector_get_modes(struct drm_connector *connector) { struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(connector->dev); - struct drm_display_mode *mode; - - mode = drm_mode_duplicate(connector->dev, &dbidev->mode); - if (!mode) { - DRM_ERROR("Failed to duplicate mode\n"); - return 0; - } - - if (mode->name[0] == '\0') - drm_mode_set_name(mode); - - mode->type |= DRM_MODE_TYPE_PREFERRED; - drm_mode_probed_add(connector, mode); - - if (mode->width_mm) { - connector->display_info.width_mm = mode->width_mm; - connector->display_info.height_mm = mode->height_mm; - } - return 1; + return drm_connector_helper_get_modes_fixed(connector, &dbidev->mode); } static const struct drm_connector_helper_funcs mipi_dbi_connector_hfuncs = { diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c index bd609a978848..f858dfedf2cf 100644 --- a/drivers/gpu/drm/drm_modeset_helper.c +++ b/drivers/gpu/drm/drm_modeset_helper.c @@ -100,8 +100,7 @@ EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct); * This is the minimal list of formats that seem to be safe for modeset use * with all current DRM drivers. Most hardware can actually support more * formats than this and drivers may specify a more accurate list when - * creating the primary plane. However drivers that still call - * drm_plane_init() will use this minimal format list as the default. + * creating the primary plane. */ static const uint32_t safe_modeset_formats[] = { DRM_FORMAT_XRGB8888, @@ -109,43 +108,9 @@ static const uint32_t safe_modeset_formats[] = { }; static const struct drm_plane_funcs primary_plane_funcs = { - .update_plane = drm_plane_helper_update_primary, - .disable_plane = drm_plane_helper_disable_primary, - .destroy = drm_plane_helper_destroy, + DRM_PLANE_NON_ATOMIC_FUNCS, }; -static struct drm_plane *create_primary_plane(struct drm_device *dev) -{ - struct drm_plane *primary; - int ret; - - primary = kzalloc(sizeof(*primary), GFP_KERNEL); - if (primary == NULL) { - DRM_DEBUG_KMS("Failed to allocate primary plane\n"); - return NULL; - } - - /* - * Remove the format_default field from drm_plane when dropping - * this helper. - */ - primary->format_default = true; - - /* possible_crtc's will be filled in later by crtc_init */ - ret = drm_universal_plane_init(dev, primary, 0, - &primary_plane_funcs, - safe_modeset_formats, - ARRAY_SIZE(safe_modeset_formats), - NULL, - DRM_PLANE_TYPE_PRIMARY, NULL); - if (ret) { - kfree(primary); - primary = NULL; - } - - return primary; -} - /** * drm_crtc_init - Legacy CRTC initialization function * @dev: DRM device @@ -177,10 +142,33 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, const struct drm_crtc_funcs *funcs) { struct drm_plane *primary; + int ret; + + /* possible_crtc's will be filled in later by crtc_init */ + primary = __drm_universal_plane_alloc(dev, sizeof(*primary), 0, 0, + &primary_plane_funcs, + safe_modeset_formats, + ARRAY_SIZE(safe_modeset_formats), + NULL, DRM_PLANE_TYPE_PRIMARY, NULL); + if (IS_ERR(primary)) + return PTR_ERR(primary); - primary = create_primary_plane(dev); - return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs, - NULL); + /* + * Remove the format_default field from drm_plane when dropping + * this helper. + */ + primary->format_default = true; + + ret = drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs, NULL); + if (ret) + goto err_drm_plane_cleanup; + + return 0; + +err_drm_plane_cleanup: + drm_plane_cleanup(primary); + kfree(primary); + return ret; } EXPORT_SYMBOL(drm_crtc_init); diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index fc1728d46ac2..8a0c0e0bb5bd 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -103,6 +103,12 @@ static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = { .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, }; +static const struct drm_dmi_panel_orientation_data lcd1080x1920_leftside_up = { + .width = 1080, + .height = 1920, + .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP, +}; + static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = { .width = 1200, .height = 1920, @@ -128,6 +134,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* Anbernic Win600 */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Anbernic"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Win600"), + }, + .driver_data = (void *)&lcd720x1280_rightside_up, }, { /* Asus T100HA */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), @@ -152,6 +164,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYA NEO 2021"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* AYA NEO AIR */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"), + DMI_MATCH(DMI_BOARD_NAME, "AIR"), + }, + .driver_data = (void *)&lcd1080x1920_leftside_up, }, { /* AYA NEO NEXT */ .matches = { DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"), diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index 726f2f163c26..33357629a7f5 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -448,6 +448,44 @@ void *__drmm_universal_plane_alloc(struct drm_device *dev, size_t size, } EXPORT_SYMBOL(__drmm_universal_plane_alloc); +void *__drm_universal_plane_alloc(struct drm_device *dev, size_t size, + size_t offset, uint32_t possible_crtcs, + const struct drm_plane_funcs *funcs, + const uint32_t *formats, unsigned int format_count, + const uint64_t *format_modifiers, + enum drm_plane_type type, + const char *name, ...) +{ + void *container; + struct drm_plane *plane; + va_list ap; + int ret; + + if (drm_WARN_ON(dev, !funcs)) + return ERR_PTR(-EINVAL); + + container = kzalloc(size, GFP_KERNEL); + if (!container) + return ERR_PTR(-ENOMEM); + + plane = container + offset; + + va_start(ap, name); + ret = __drm_universal_plane_init(dev, plane, possible_crtcs, funcs, + formats, format_count, format_modifiers, + type, name, ap); + va_end(ap); + if (ret) + goto err_kfree; + + return container; + +err_kfree: + kfree(container); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(__drm_universal_plane_alloc); + int drm_plane_register_all(struct drm_device *dev) { unsigned int num_planes = 0; @@ -483,38 +521,6 @@ void drm_plane_unregister_all(struct drm_device *dev) } /** - * drm_plane_init - Initialize a legacy plane - * @dev: DRM device - * @plane: plane object to init - * @possible_crtcs: bitmask of possible CRTCs - * @funcs: callbacks for the new plane - * @formats: array of supported formats (DRM_FORMAT\_\*) - * @format_count: number of elements in @formats - * @is_primary: plane type (primary vs overlay) - * - * Legacy API to initialize a DRM plane. - * - * New drivers should call drm_universal_plane_init() instead. - * - * Returns: - * Zero on success, error code on failure. - */ -int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, - uint32_t possible_crtcs, - const struct drm_plane_funcs *funcs, - const uint32_t *formats, unsigned int format_count, - bool is_primary) -{ - enum drm_plane_type type; - - type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; - return drm_universal_plane_init(dev, plane, possible_crtcs, funcs, - formats, format_count, - NULL, type, NULL); -} -EXPORT_SYMBOL(drm_plane_init); - -/** * drm_plane_cleanup - Clean up the core plane usage * @plane: plane to cleanup * diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index c7785967f5bf..865bd999b187 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -30,8 +30,10 @@ #include <drm/drm_atomic_uapi.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_device.h> +#include <drm/drm_drv.h> #include <drm/drm_encoder.h> #include <drm/drm_plane_helper.h> +#include <drm/drm_print.h> #include <drm/drm_rect.h> #define SUBPIXEL_MASK 0xffff @@ -195,10 +197,14 @@ int drm_plane_helper_update_primary(struct drm_plane *plane, struct drm_crtc *cr .x2 = crtc_x + crtc_w, .y2 = crtc_y + crtc_h, }; + struct drm_device *dev = plane->dev; struct drm_connector **connector_list; int num_connectors, ret; bool visible; + if (drm_WARN_ON_ONCE(dev, drm_drv_uses_atomic_modeset(dev))) + return -EINVAL; + ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, DRM_MODE_ROTATE_0, @@ -260,6 +266,10 @@ EXPORT_SYMBOL(drm_plane_helper_update_primary); int drm_plane_helper_disable_primary(struct drm_plane *plane, struct drm_modeset_acquire_ctx *ctx) { + struct drm_device *dev = plane->dev; + + drm_WARN_ON_ONCE(dev, drm_drv_uses_atomic_modeset(dev)); + return -EINVAL; } EXPORT_SYMBOL(drm_plane_helper_disable_primary); @@ -278,3 +288,33 @@ void drm_plane_helper_destroy(struct drm_plane *plane) kfree(plane); } EXPORT_SYMBOL(drm_plane_helper_destroy); + +/** + * drm_plane_helper_atomic_check() - Helper to check plane atomic-state + * @plane: plane to check + * @state: atomic state object + * + * Provides a default plane-state check handler for planes whose atomic-state + * scale and positioning are not expected to change since the plane is always + * a fullscreen scanout buffer. + * + * This is often the case for the primary plane of simple framebuffers. + * + * RETURNS: + * Zero on success, or an errno code otherwise. + */ +int drm_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_crtc *new_crtc = new_plane_state->crtc; + struct drm_crtc_state *new_crtc_state = NULL; + + if (new_crtc) + new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc); + + return drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + false, false); +} +EXPORT_SYMBOL(drm_plane_helper_atomic_check); diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index bb427c5a4f1f..69b0b2b9cc1c 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -1015,6 +1015,30 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev) EXPORT_SYMBOL(drm_helper_hpd_irq_event); /** + * drm_crtc_helper_mode_valid_fixed - Validates a display mode + * @crtc: the crtc + * @mode: the mode to validate + * @fixed_mode: the display hardware's mode + * + * Returns: + * MODE_OK on success, or another mode-status code otherwise. + */ +enum drm_mode_status drm_crtc_helper_mode_valid_fixed(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + const struct drm_display_mode *fixed_mode) +{ + if (mode->hdisplay != fixed_mode->hdisplay && mode->vdisplay != fixed_mode->vdisplay) + return MODE_ONE_SIZE; + else if (mode->hdisplay != fixed_mode->hdisplay) + return MODE_ONE_WIDTH; + else if (mode->vdisplay != fixed_mode->vdisplay) + return MODE_ONE_HEIGHT; + + return MODE_OK; +} +EXPORT_SYMBOL(drm_crtc_helper_mode_valid_fixed); + +/** * drm_connector_helper_get_modes_from_ddc - Updates the connector's EDID * property from the connector's * DDC channel @@ -1051,6 +1075,46 @@ int drm_connector_helper_get_modes_from_ddc(struct drm_connector *connector) EXPORT_SYMBOL(drm_connector_helper_get_modes_from_ddc); /** + * drm_connector_helper_get_modes_fixed - Duplicates a display mode for a connector + * @connector: the connector + * @fixed_mode: the display hardware's mode + * + * This function duplicates a display modes for a connector. Drivers for hardware + * that only supports a single fixed mode can use this function in their connector's + * get_modes helper. + * + * Returns: + * The number of created modes. + */ +int drm_connector_helper_get_modes_fixed(struct drm_connector *connector, + const struct drm_display_mode *fixed_mode) +{ + struct drm_device *dev = connector->dev; + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(dev, fixed_mode); + if (!mode) { + drm_err(dev, "Failed to duplicate mode " DRM_MODE_FMT "\n", + DRM_MODE_ARG(fixed_mode)); + return 0; + } + + if (mode->name[0] == '\0') + drm_mode_set_name(mode); + + mode->type |= DRM_MODE_TYPE_PREFERRED; + drm_mode_probed_add(connector, mode); + + if (mode->width_mm) + connector->display_info.width_mm = mode->width_mm; + if (mode->height_mm) + connector->display_info.height_mm = mode->height_mm; + + return 1; +} +EXPORT_SYMBOL(drm_connector_helper_get_modes_fixed); + +/** * drm_connector_helper_get_modes - Read EDID and update connector. * @connector: The connector * diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 10b0036f8a2e..b7c11bdce2c8 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -893,7 +893,7 @@ static int hdmi_get_modes(struct drm_connector *connector) if (!edid) return -ENODEV; - hdata->dvi_mode = !drm_detect_hdmi_monitor(edid); + hdata->dvi_mode = !connector->display_info.is_hdmi; DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n", (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"), edid->width_cm, edid->height_cm); @@ -922,8 +922,8 @@ static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock) return -EINVAL; } -static int hdmi_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status hdmi_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { struct hdmi_context *hdata = connector_to_hdmi(connector); int ret; diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 65260a658684..8d333db813b7 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -1045,7 +1045,7 @@ static void mixer_atomic_disable(struct exynos_drm_crtc *crtc) clear_bit(MXR_BIT_POWERED, &ctx->flags); } -static int mixer_mode_valid(struct exynos_drm_crtc *crtc, +static enum drm_mode_status mixer_mode_valid(struct exynos_drm_crtc *crtc, const struct drm_display_mode *mode) { struct mixer_context *ctx = crtc->ctx; diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig index 0cff20265f97..807b989e3c77 100644 --- a/drivers/gpu/drm/gma500/Kconfig +++ b/drivers/gpu/drm/gma500/Kconfig @@ -7,6 +7,8 @@ config DRM_GMA500 select ACPI_VIDEO if ACPI select BACKLIGHT_CLASS_DEVICE if ACPI select INPUT if ACPI + select X86_PLATFORM_DEVICES if ACPI + select ACPI_WMI if ACPI help Say yes for an experimental 2D KMS framebuffer driver for the Intel GMA500 (Poulsbo), Intel GMA600 (Moorestown/Oak Trail) and diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c index 46b9c0f13d6d..577a4987b193 100644 --- a/drivers/gpu/drm/gma500/backlight.c +++ b/drivers/gpu/drm/gma500/backlight.c @@ -7,75 +7,109 @@ * Authors: Eric Knopp */ +#include <acpi/video.h> + #include "psb_drv.h" #include "psb_intel_reg.h" #include "psb_intel_drv.h" #include "intel_bios.h" #include "power.h" -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE -static void do_gma_backlight_set(struct drm_device *dev) -{ - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - backlight_update_status(dev_priv->backlight_device); -} -#endif - void gma_backlight_enable(struct drm_device *dev) { -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE struct drm_psb_private *dev_priv = to_drm_psb_private(dev); + dev_priv->backlight_enabled = true; - if (dev_priv->backlight_device) { - dev_priv->backlight_device->props.brightness = dev_priv->backlight_level; - do_gma_backlight_set(dev); - } -#endif + dev_priv->ops->backlight_set(dev, dev_priv->backlight_level); } void gma_backlight_disable(struct drm_device *dev) { -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE struct drm_psb_private *dev_priv = to_drm_psb_private(dev); + dev_priv->backlight_enabled = false; - if (dev_priv->backlight_device) { - dev_priv->backlight_device->props.brightness = 0; - do_gma_backlight_set(dev); - } -#endif + dev_priv->ops->backlight_set(dev, 0); } void gma_backlight_set(struct drm_device *dev, int v) { -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE struct drm_psb_private *dev_priv = to_drm_psb_private(dev); + dev_priv->backlight_level = v; - if (dev_priv->backlight_device && dev_priv->backlight_enabled) { - dev_priv->backlight_device->props.brightness = v; - do_gma_backlight_set(dev); - } -#endif + if (dev_priv->backlight_enabled) + dev_priv->ops->backlight_set(dev, v); +} + +static int gma_backlight_get_brightness(struct backlight_device *bd) +{ + struct drm_device *dev = bl_get_data(bd); + struct drm_psb_private *dev_priv = to_drm_psb_private(dev); + + if (dev_priv->ops->backlight_get) + return dev_priv->ops->backlight_get(dev); + + return dev_priv->backlight_level; } +static int gma_backlight_update_status(struct backlight_device *bd) +{ + struct drm_device *dev = bl_get_data(bd); + int level = backlight_get_brightness(bd); + + /* Percentage 1-100% being valid */ + if (level < 1) + level = 1; + + gma_backlight_set(dev, level); + return 0; +} + +static const struct backlight_ops gma_backlight_ops __maybe_unused = { + .get_brightness = gma_backlight_get_brightness, + .update_status = gma_backlight_update_status, +}; + int gma_backlight_init(struct drm_device *dev) { -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE struct drm_psb_private *dev_priv = to_drm_psb_private(dev); + struct backlight_properties props __maybe_unused = {}; + int ret; + dev_priv->backlight_enabled = true; - return dev_priv->ops->backlight_init(dev); -#else - return 0; + dev_priv->backlight_level = 100; + + ret = dev_priv->ops->backlight_init(dev); + if (ret) + return ret; + + if (!acpi_video_backlight_use_native()) { + drm_info(dev, "Skipping %s backlight registration\n", + dev_priv->ops->backlight_name); + return 0; + } + +#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE + props.brightness = dev_priv->backlight_level; + props.max_brightness = PSB_MAX_BRIGHTNESS; + props.type = BACKLIGHT_RAW; + + dev_priv->backlight_device = + backlight_device_register(dev_priv->ops->backlight_name, + dev->dev, dev, + &gma_backlight_ops, &props); + if (IS_ERR(dev_priv->backlight_device)) + return PTR_ERR(dev_priv->backlight_device); #endif + + return 0; } void gma_backlight_exit(struct drm_device *dev) { #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - if (dev_priv->backlight_device) { - dev_priv->backlight_device->props.brightness = 0; - backlight_update_status(dev_priv->backlight_device); + + if (dev_priv->backlight_device) backlight_device_unregister(dev_priv->backlight_device); - } #endif } diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c index dd32b484dd82..3065596257e9 100644 --- a/drivers/gpu/drm/gma500/cdv_device.c +++ b/drivers/gpu/drm/gma500/cdv_device.c @@ -5,7 +5,6 @@ * **************************************************************************/ -#include <linux/backlight.h> #include <linux/delay.h> #include <drm/drm.h> @@ -62,14 +61,10 @@ static int cdv_output_init(struct drm_device *dev) return 0; } -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE - /* * Cedartrail Backlght Interfaces */ -static struct backlight_device *cdv_backlight_device; - static int cdv_backlight_combination_mode(struct drm_device *dev) { return REG_READ(BLC_PWM_CTL2) & PWM_LEGACY_MODE; @@ -92,9 +87,8 @@ static u32 cdv_get_max_backlight(struct drm_device *dev) return max; } -static int cdv_get_brightness(struct backlight_device *bd) +static int cdv_get_brightness(struct drm_device *dev) { - struct drm_device *dev = bl_get_data(bd); struct pci_dev *pdev = to_pci_dev(dev->dev); u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; @@ -106,20 +100,13 @@ static int cdv_get_brightness(struct backlight_device *bd) val *= lbpc; } return (val * 100)/cdv_get_max_backlight(dev); - } -static int cdv_set_brightness(struct backlight_device *bd) +static void cdv_set_brightness(struct drm_device *dev, int level) { - struct drm_device *dev = bl_get_data(bd); struct pci_dev *pdev = to_pci_dev(dev->dev); - int level = bd->props.brightness; u32 blc_pwm_ctl; - /* Percentage 1-100% being valid */ - if (level < 1) - level = 1; - level *= cdv_get_max_backlight(dev); level /= 100; @@ -136,38 +123,18 @@ static int cdv_set_brightness(struct backlight_device *bd) blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; REG_WRITE(BLC_PWM_CTL, (blc_pwm_ctl | (level << BACKLIGHT_DUTY_CYCLE_SHIFT))); - return 0; } -static const struct backlight_ops cdv_ops = { - .get_brightness = cdv_get_brightness, - .update_status = cdv_set_brightness, -}; - static int cdv_backlight_init(struct drm_device *dev) { struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - struct backlight_properties props; - - memset(&props, 0, sizeof(struct backlight_properties)); - props.max_brightness = 100; - props.type = BACKLIGHT_PLATFORM; - - cdv_backlight_device = backlight_device_register("psb-bl", - NULL, (void *)dev, &cdv_ops, &props); - if (IS_ERR(cdv_backlight_device)) - return PTR_ERR(cdv_backlight_device); - - cdv_backlight_device->props.brightness = - cdv_get_brightness(cdv_backlight_device); - backlight_update_status(cdv_backlight_device); - dev_priv->backlight_device = cdv_backlight_device; - dev_priv->backlight_enabled = true; + + dev_priv->backlight_level = cdv_get_brightness(dev); + cdv_set_brightness(dev, dev_priv->backlight_level); + return 0; } -#endif - /* * Provide the Cedarview specific chip logic and low level methods * for power management @@ -581,11 +548,9 @@ static const struct psb_offset cdv_regmap[2] = { static int cdv_chip_setup(struct drm_device *dev) { struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - struct pci_dev *pdev = to_pci_dev(dev->dev); INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func); - if (pci_enable_msi(pdev)) - dev_warn(dev->dev, "Enabling MSI failed!\n"); + dev_priv->use_msi = true; dev_priv->regmap = cdv_regmap; gma_get_core_freq(dev); psb_intel_opregion_init(dev); @@ -615,9 +580,10 @@ const struct psb_ops cdv_chip_ops = { .hotplug = cdv_hotplug_event, .hotplug_enable = cdv_hotplug_enable, -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE .backlight_init = cdv_backlight_init, -#endif + .backlight_get = cdv_get_brightness, + .backlight_set = cdv_set_brightness, + .backlight_name = "psb-bl", .init_pm = cdv_init_pm, .save_regs = cdv_save_display_registers, diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index bd40c040a2c9..b0ea911b27de 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -552,28 +552,11 @@ int gma_crtc_page_flip(struct drm_crtc *crtc, return ret; } -int gma_crtc_set_config(struct drm_mode_set *set, - struct drm_modeset_acquire_ctx *ctx) -{ - struct drm_device *dev = set->crtc->dev; - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - int ret; - - if (!dev_priv->rpm_enabled) - return drm_crtc_helper_set_config(set, ctx); - - pm_runtime_forbid(dev->dev); - ret = drm_crtc_helper_set_config(set, ctx); - pm_runtime_allow(dev->dev); - - return ret; -} - const struct drm_crtc_funcs gma_crtc_funcs = { .cursor_set = gma_crtc_cursor_set, .cursor_move = gma_crtc_cursor_move, .gamma_set = gma_crtc_gamma_set, - .set_config = gma_crtc_set_config, + .set_config = drm_crtc_helper_set_config, .destroy = gma_crtc_destroy, .page_flip = gma_crtc_page_flip, .enable_vblank = gma_crtc_enable_vblank, diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h index 113cf048105e..c8b611a2f6c6 100644 --- a/drivers/gpu/drm/gma500/gma_display.h +++ b/drivers/gpu/drm/gma500/gma_display.h @@ -69,8 +69,6 @@ extern int gma_crtc_page_flip(struct drm_crtc *crtc, struct drm_pending_vblank_event *event, uint32_t page_flip_flags, struct drm_modeset_acquire_ctx *ctx); -extern int gma_crtc_set_config(struct drm_mode_set *set, - struct drm_modeset_acquire_ctx *ctx); extern void gma_crtc_save(struct drm_crtc *crtc); extern void gma_crtc_restore(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c index 5923a9c89312..2531959d3d77 100644 --- a/drivers/gpu/drm/gma500/oaktrail_device.c +++ b/drivers/gpu/drm/gma500/oaktrail_device.c @@ -5,7 +5,6 @@ * **************************************************************************/ -#include <linux/backlight.h> #include <linux/delay.h> #include <linux/dmi.h> #include <linux/module.h> @@ -37,29 +36,18 @@ static int oaktrail_output_init(struct drm_device *dev) * Provide the low level interfaces for the Moorestown backlight */ -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE - #define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF #define BLC_PWM_PRECISION_FACTOR 100 /* 10000000 */ #define BLC_PWM_FREQ_CALC_CONSTANT 32 #define MHz 1000000 #define BLC_ADJUSTMENT_MAX 100 -static struct backlight_device *oaktrail_backlight_device; -static int oaktrail_brightness; - -static int oaktrail_set_brightness(struct backlight_device *bd) +static void oaktrail_set_brightness(struct drm_device *dev, int level) { - struct drm_device *dev = bl_get_data(oaktrail_backlight_device); struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - int level = bd->props.brightness; u32 blc_pwm_ctl; u32 max_pwm_blc; - /* Percentage 1-100% being valid */ - if (level < 1) - level = 1; - if (gma_power_begin(dev, 0)) { /* Calculate and set the brightness value */ max_pwm_blc = REG_READ(BLC_PWM_CTL) >> 16; @@ -82,19 +70,9 @@ static int oaktrail_set_brightness(struct backlight_device *bd) REG_WRITE(BLC_PWM_CTL, (max_pwm_blc << 16) | blc_pwm_ctl); gma_power_end(dev); } - oaktrail_brightness = level; - return 0; -} - -static int oaktrail_get_brightness(struct backlight_device *bd) -{ - /* return locally cached var instead of HW read (due to DPST etc.) */ - /* FIXME: ideally return actual value in case firmware fiddled with - it */ - return oaktrail_brightness; } -static int device_backlight_init(struct drm_device *dev) +static int oaktrail_backlight_init(struct drm_device *dev) { struct drm_psb_private *dev_priv = to_drm_psb_private(dev); unsigned long core_clock; @@ -123,44 +101,11 @@ static int device_backlight_init(struct drm_device *dev) REG_WRITE(BLC_PWM_CTL, value | (value << 16)); gma_power_end(dev); } - return 0; -} - -static const struct backlight_ops oaktrail_ops = { - .get_brightness = oaktrail_get_brightness, - .update_status = oaktrail_set_brightness, -}; -static int oaktrail_backlight_init(struct drm_device *dev) -{ - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - int ret; - struct backlight_properties props; - - memset(&props, 0, sizeof(struct backlight_properties)); - props.max_brightness = 100; - props.type = BACKLIGHT_PLATFORM; - - oaktrail_backlight_device = backlight_device_register("oaktrail-bl", - NULL, (void *)dev, &oaktrail_ops, &props); - - if (IS_ERR(oaktrail_backlight_device)) - return PTR_ERR(oaktrail_backlight_device); - - ret = device_backlight_init(dev); - if (ret < 0) { - backlight_device_unregister(oaktrail_backlight_device); - return ret; - } - oaktrail_backlight_device->props.brightness = 100; - oaktrail_backlight_device->props.max_brightness = 100; - backlight_update_status(oaktrail_backlight_device); - dev_priv->backlight_device = oaktrail_backlight_device; + oaktrail_set_brightness(dev, PSB_MAX_BRIGHTNESS); return 0; } -#endif - /* * Provide the Moorestown specific chip logic and low level methods * for power management @@ -501,12 +446,9 @@ static const struct psb_offset oaktrail_regmap[2] = { static int oaktrail_chip_setup(struct drm_device *dev) { struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - struct pci_dev *pdev = to_pci_dev(dev->dev); int ret; - if (pci_enable_msi(pdev)) - dev_warn(dev->dev, "Enabling MSI failed!\n"); - + dev_priv->use_msi = true; dev_priv->regmap = oaktrail_regmap; ret = mid_chip_setup(dev); @@ -548,9 +490,9 @@ const struct psb_ops oaktrail_chip_ops = { .output_init = oaktrail_output_init, -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE .backlight_init = oaktrail_backlight_init, -#endif + .backlight_set = oaktrail_set_brightness, + .backlight_name = "oaktrail-bl", .save_regs = oaktrail_save_display_registers, .restore_regs = oaktrail_restore_display_registers, diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c index 4d98df189e10..75b4eb1c8884 100644 --- a/drivers/gpu/drm/gma500/oaktrail_lvds.c +++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c @@ -61,7 +61,6 @@ static void oaktrail_lvds_set_power(struct drm_device *dev, pp_status = REG_READ(PP_STATUS); } while (pp_status & PP_ON); dev_priv->is_lvds_on = false; - pm_request_idle(dev->dev); } gma_power_end(dev); } diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c index dc494df71a48..0c271072af63 100644 --- a/drivers/gpu/drm/gma500/opregion.c +++ b/drivers/gpu/drm/gma500/opregion.c @@ -150,21 +150,17 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) { struct drm_psb_private *dev_priv = to_drm_psb_private(dev); struct opregion_asle *asle = dev_priv->opregion.asle; - struct backlight_device *bd = dev_priv->backlight_device; DRM_DEBUG_DRIVER("asle set backlight %x\n", bclp); if (!(bclp & ASLE_BCLP_VALID)) return ASLE_BACKLIGHT_FAILED; - if (bd == NULL) - return ASLE_BACKLIGHT_FAILED; - bclp &= ASLE_BCLP_MSK; if (bclp > 255) return ASLE_BACKLIGHT_FAILED; - gma_backlight_set(dev, bclp * bd->props.max_brightness / 255); + gma_backlight_set(dev, bclp * PSB_MAX_BRIGHTNESS / 255); asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID; diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c index b91de6d36e41..186af29bea6f 100644 --- a/drivers/gpu/drm/gma500/power.c +++ b/drivers/gpu/drm/gma500/power.c @@ -37,9 +37,6 @@ #include <linux/mutex.h> #include <linux/pm_runtime.h> -static struct mutex power_mutex; /* Serialize power ops */ -static DEFINE_SPINLOCK(power_ctrl_lock); /* Serialize power claim */ - /** * gma_power_init - initialise power manager * @dev: our device @@ -54,13 +51,23 @@ void gma_power_init(struct drm_device *dev) dev_priv->apm_base = dev_priv->apm_reg & 0xffff; dev_priv->ospm_base &= 0xffff; - dev_priv->display_power = true; /* We start active */ - dev_priv->display_count = 0; /* Currently no users */ - dev_priv->suspended = false; /* And not suspended */ - mutex_init(&power_mutex); - if (dev_priv->ops->init_pm) dev_priv->ops->init_pm(dev); + + /* + * Runtime pm support is broken atm. So for now unconditionally + * call pm_runtime_get() here and put it again in psb_driver_unload() + * + * To fix this we need to call pm_runtime_get() once for each active + * pipe at boot and then put() / get() for each pipe disable / enable + * so that the device gets runtime suspended when no pipes are active. + * Once this is in place the pm_runtime_get() below should be replaced + * by a pm_runtime_allow() call to undo the pm_runtime_forbid() from + * pci_pm_init(). + */ + pm_runtime_get(dev->dev); + + dev_priv->pm_initialized = true; } /** @@ -71,8 +78,12 @@ void gma_power_init(struct drm_device *dev) */ void gma_power_uninit(struct drm_device *dev) { - pm_runtime_disable(dev->dev); - pm_runtime_set_suspended(dev->dev); + struct drm_psb_private *dev_priv = to_drm_psb_private(dev); + + if (!dev_priv->pm_initialized) + return; + + pm_runtime_put_noidle(dev->dev); } /** @@ -85,11 +96,8 @@ static void gma_suspend_display(struct drm_device *dev) { struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - if (dev_priv->suspended) - return; dev_priv->ops->save_regs(dev); dev_priv->ops->power_down(dev); - dev_priv->display_power = false; } /** @@ -106,8 +114,6 @@ static void gma_resume_display(struct pci_dev *pdev) /* turn on the display power island */ dev_priv->ops->power_up(dev); - dev_priv->suspended = false; - dev_priv->display_power = true; PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL); pci_write_config_word(pdev, PSB_GMCH_CTRL, @@ -131,21 +137,14 @@ static void gma_suspend_pci(struct pci_dev *pdev) struct drm_psb_private *dev_priv = to_drm_psb_private(dev); int bsm, vbt; - if (dev_priv->suspended) - return; - pci_save_state(pdev); pci_read_config_dword(pdev, 0x5C, &bsm); dev_priv->regs.saveBSM = bsm; pci_read_config_dword(pdev, 0xFC, &vbt); dev_priv->regs.saveVBT = vbt; - pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr); - pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data); pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); - - dev_priv->suspended = true; } /** @@ -155,29 +154,17 @@ static void gma_suspend_pci(struct pci_dev *pdev) * Perform the resume processing on our PCI device state - rewrite * register state and re-enable the PCI device */ -static bool gma_resume_pci(struct pci_dev *pdev) +static int gma_resume_pci(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - int ret; - - if (!dev_priv->suspended) - return true; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); pci_write_config_dword(pdev, 0x5c, dev_priv->regs.saveBSM); pci_write_config_dword(pdev, 0xFC, dev_priv->regs.saveVBT); - /* restoring MSI address and data in PCIx space */ - pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr); - pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data); - ret = pci_enable_device(pdev); - if (ret != 0) - dev_err(&pdev->dev, "pci_enable failed: %d\n", ret); - else - dev_priv->suspended = false; - return !dev_priv->suspended; + return pci_enable_device(pdev); } /** @@ -192,20 +179,10 @@ int gma_power_suspend(struct device *_dev) { struct pci_dev *pdev = to_pci_dev(_dev); struct drm_device *dev = pci_get_drvdata(pdev); - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - mutex_lock(&power_mutex); - if (!dev_priv->suspended) { - if (dev_priv->display_count) { - mutex_unlock(&power_mutex); - dev_err(dev->dev, "GPU hardware busy, cannot suspend\n"); - return -EBUSY; - } - gma_irq_uninstall(dev); - gma_suspend_display(dev); - gma_suspend_pci(pdev); - } - mutex_unlock(&power_mutex); + gma_irq_uninstall(dev); + gma_suspend_display(dev); + gma_suspend_pci(pdev); return 0; } @@ -220,28 +197,13 @@ int gma_power_resume(struct device *_dev) struct pci_dev *pdev = to_pci_dev(_dev); struct drm_device *dev = pci_get_drvdata(pdev); - mutex_lock(&power_mutex); gma_resume_pci(pdev); gma_resume_display(pdev); - gma_irq_preinstall(dev); - gma_irq_postinstall(dev); - mutex_unlock(&power_mutex); + gma_irq_install(dev); return 0; } /** - * gma_power_is_on - returne true if power is on - * @dev: our DRM device - * - * Returns true if the display island power is on at this moment - */ -bool gma_power_is_on(struct drm_device *dev) -{ - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - return dev_priv->display_power; -} - -/** * gma_power_begin - begin requiring power * @dev: our DRM device * @force_on: true to force power on @@ -251,35 +213,10 @@ bool gma_power_is_on(struct drm_device *dev) */ bool gma_power_begin(struct drm_device *dev, bool force_on) { - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - struct pci_dev *pdev = to_pci_dev(dev->dev); - int ret; - unsigned long flags; - - spin_lock_irqsave(&power_ctrl_lock, flags); - /* Power already on ? */ - if (dev_priv->display_power) { - dev_priv->display_count++; - pm_runtime_get(dev->dev); - spin_unlock_irqrestore(&power_ctrl_lock, flags); - return true; - } - if (force_on == false) - goto out_false; - - /* Ok power up needed */ - ret = gma_resume_pci(pdev); - if (ret == 0) { - gma_irq_preinstall(dev); - gma_irq_postinstall(dev); - pm_runtime_get(dev->dev); - dev_priv->display_count++; - spin_unlock_irqrestore(&power_ctrl_lock, flags); - return true; - } -out_false: - spin_unlock_irqrestore(&power_ctrl_lock, flags); - return false; + if (force_on) + return pm_runtime_resume_and_get(dev->dev) == 0; + else + return pm_runtime_get_if_in_use(dev->dev) == 1; } /** @@ -291,46 +228,5 @@ out_false: */ void gma_power_end(struct drm_device *dev) { - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - unsigned long flags; - spin_lock_irqsave(&power_ctrl_lock, flags); - dev_priv->display_count--; - WARN_ON(dev_priv->display_count < 0); - spin_unlock_irqrestore(&power_ctrl_lock, flags); pm_runtime_put(dev->dev); } - -int psb_runtime_suspend(struct device *dev) -{ - return gma_power_suspend(dev); -} - -int psb_runtime_resume(struct device *dev) -{ - return gma_power_resume(dev); -} - -int psb_runtime_idle(struct device *dev) -{ - struct drm_device *drmdev = pci_get_drvdata(to_pci_dev(dev)); - struct drm_psb_private *dev_priv = to_drm_psb_private(drmdev); - if (dev_priv->display_count) - return 0; - else - return 1; -} - -int gma_power_thaw(struct device *_dev) -{ - return gma_power_resume(_dev); -} - -int gma_power_freeze(struct device *_dev) -{ - return gma_power_suspend(_dev); -} - -int gma_power_restore(struct device *_dev) -{ - return gma_power_resume(_dev); -} diff --git a/drivers/gpu/drm/gma500/power.h b/drivers/gpu/drm/gma500/power.h index 0c89c4d6ec20..063328d66652 100644 --- a/drivers/gpu/drm/gma500/power.h +++ b/drivers/gpu/drm/gma500/power.h @@ -43,9 +43,6 @@ void gma_power_uninit(struct drm_device *dev); */ int gma_power_suspend(struct device *dev); int gma_power_resume(struct device *dev); -int gma_power_thaw(struct device *dev); -int gma_power_freeze(struct device *dev); -int gma_power_restore(struct device *_dev); /* * These are the functions the driver should use to wrap all hw access @@ -54,19 +51,4 @@ int gma_power_restore(struct device *_dev); bool gma_power_begin(struct drm_device *dev, bool force); void gma_power_end(struct drm_device *dev); -/* - * Use this function to do an instantaneous check for if the hw is on. - * Only use this in cases where you know the mutex is already held such - * as in irq install/uninstall and you need to - * prevent a deadlock situation. Otherwise use gma_power_begin(). - */ -bool gma_power_is_on(struct drm_device *dev); - -/* - * GFX-Runtime PM callbacks - */ -int psb_runtime_suspend(struct device *dev); -int psb_runtime_resume(struct device *dev); -int psb_runtime_idle(struct device *dev); - #endif /*_PSB_POWERMGMT_H_*/ diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c index 71534f4ca834..3c294c38bdb4 100644 --- a/drivers/gpu/drm/gma500/psb_device.c +++ b/drivers/gpu/drm/gma500/psb_device.c @@ -5,8 +5,6 @@ * **************************************************************************/ -#include <linux/backlight.h> - #include <drm/drm.h> #include "gma_device.h" @@ -24,8 +22,6 @@ static int psb_output_init(struct drm_device *dev) return 0; } -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE - /* * Poulsbo Backlight Interfaces */ @@ -41,18 +37,6 @@ static int psb_output_init(struct drm_device *dev) #define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE) #define PSB_BACKLIGHT_PWM_CTL_SHIFT (16) -static int psb_brightness; -static struct backlight_device *psb_backlight_device; - -static int psb_get_brightness(struct backlight_device *bd) -{ - /* return locally cached var instead of HW read (due to DPST etc.) */ - /* FIXME: ideally return actual value in case firmware fiddled with - it */ - return psb_brightness; -} - - static int psb_backlight_setup(struct drm_device *dev) { struct drm_psb_private *dev_priv = to_drm_psb_private(dev); @@ -86,62 +70,13 @@ static int psb_backlight_setup(struct drm_device *dev) REG_WRITE(BLC_PWM_CTL, (value << PSB_BACKLIGHT_PWM_CTL_SHIFT) | (value)); } - return 0; -} - -static int psb_set_brightness(struct backlight_device *bd) -{ - struct drm_device *dev = bl_get_data(psb_backlight_device); - int level = bd->props.brightness; - - /* Percentage 1-100% being valid */ - if (level < 1) - level = 1; - - psb_intel_lvds_set_brightness(dev, level); - psb_brightness = level; - return 0; -} - -static const struct backlight_ops psb_ops = { - .get_brightness = psb_get_brightness, - .update_status = psb_set_brightness, -}; - -static int psb_backlight_init(struct drm_device *dev) -{ - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - int ret; - struct backlight_properties props; - - memset(&props, 0, sizeof(struct backlight_properties)); - props.max_brightness = 100; - props.type = BACKLIGHT_PLATFORM; - - psb_backlight_device = backlight_device_register("psb-bl", - NULL, (void *)dev, &psb_ops, &props); - if (IS_ERR(psb_backlight_device)) - return PTR_ERR(psb_backlight_device); - - ret = psb_backlight_setup(dev); - if (ret < 0) { - backlight_device_unregister(psb_backlight_device); - psb_backlight_device = NULL; - return ret; - } - psb_backlight_device->props.brightness = 100; - psb_backlight_device->props.max_brightness = 100; - backlight_update_status(psb_backlight_device); - dev_priv->backlight_device = psb_backlight_device; + psb_intel_lvds_set_brightness(dev, PSB_MAX_BRIGHTNESS); /* This must occur after the backlight is properly initialised */ psb_lid_timer_init(dev_priv); - return 0; } -#endif - /* * Provide the Poulsbo specific chip logic and low level methods * for power management @@ -345,9 +280,9 @@ const struct psb_ops psb_chip_ops = { .output_init = psb_output_init, -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE - .backlight_init = psb_backlight_init, -#endif + .backlight_init = psb_backlight_setup, + .backlight_set = psb_intel_lvds_set_brightness, + .backlight_name = "psb-bl", .init_pm = psb_init_pm, .save_regs = psb_save_display_registers, diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 1d8744f3e702..cd9c73f5a64a 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -169,8 +169,7 @@ static void psb_driver_unload(struct drm_device *dev) /* TODO: Kill vblank etc here */ - if (dev_priv->backlight_device) - gma_backlight_exit(dev); + gma_backlight_exit(dev); psb_modeset_cleanup(dev); gma_irq_uninstall(dev); @@ -383,7 +382,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags) PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R); spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); - gma_irq_install(dev, pdev->irq); + gma_irq_install(dev); dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ @@ -399,6 +398,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags) if (gma_encoder->type == INTEL_OUTPUT_LVDS || gma_encoder->type == INTEL_OUTPUT_MIPI) { ret = gma_backlight_init(dev); + if (ret == 0) + acpi_video_register_backlight(); break; } } @@ -407,11 +408,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags) if (ret) return ret; psb_intel_opregion_enable_asle(dev); -#if 0 - /* Enable runtime pm at last */ - pm_runtime_enable(dev->dev); - pm_runtime_set_active(dev->dev); -#endif return devm_add_action_or_reset(dev->dev, psb_device_release, dev); @@ -420,33 +416,6 @@ out_err: return ret; } -static inline void get_brightness(struct backlight_device *bd) -{ -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE - if (bd) { - bd->props.brightness = bd->ops->get_brightness(bd); - backlight_update_status(bd); - } -#endif -} - -static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd, - unsigned long arg) -{ - struct drm_file *file_priv = filp->private_data; - struct drm_device *dev = file_priv->minor->dev; - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - static unsigned int runtime_allowed; - - if (runtime_allowed == 1 && dev_priv->is_lvds_on) { - runtime_allowed++; - pm_runtime_allow(dev->dev); - dev_priv->rpm_enabled = 1; - } - return drm_ioctl(filp, cmd, arg); - /* FIXME: do we need to wrap the other side of this */ -} - static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct drm_psb_private *dev_priv; @@ -493,22 +462,13 @@ static void psb_pci_remove(struct pci_dev *pdev) drm_dev_unregister(dev); } -static const struct dev_pm_ops psb_pm_ops = { - .resume = gma_power_resume, - .suspend = gma_power_suspend, - .thaw = gma_power_thaw, - .freeze = gma_power_freeze, - .restore = gma_power_restore, - .runtime_suspend = psb_runtime_suspend, - .runtime_resume = psb_runtime_resume, - .runtime_idle = psb_runtime_idle, -}; +static DEFINE_RUNTIME_DEV_PM_OPS(psb_pm_ops, gma_power_suspend, gma_power_resume, NULL); static const struct file_operations psb_gem_fops = { .owner = THIS_MODULE, .open = drm_open, .release = drm_release, - .unlocked_ioctl = psb_unlocked_ioctl, + .unlocked_ioctl = drm_ioctl, .compat_ioctl = drm_compat_ioctl, .mmap = drm_gem_mmap, .poll = drm_poll, diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index 0ea3d23575f3..ae544b69fc47 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -172,6 +172,8 @@ #define PSB_WATCHDOG_DELAY (HZ * 2) #define PSB_LID_DELAY (HZ / 10) +#define PSB_MAX_BRIGHTNESS 100 + #define PSB_PWR_STATE_ON 1 #define PSB_PWR_STATE_OFF 2 @@ -426,9 +428,7 @@ struct drm_psb_private { spinlock_t irqmask_lock; /* Power */ - bool suspended; - bool display_power; - int display_count; + bool pm_initialized; /* Modesetting */ struct psb_intel_mode_device mode_dev; @@ -486,10 +486,8 @@ struct drm_psb_private { unsigned int core_freq; uint32_t iLVDS_enable; - /* Runtime PM state */ - int rpm_enabled; - /* MID specific */ + bool use_msi; bool has_gct; struct oaktrail_gct_data gct_data; @@ -499,10 +497,6 @@ struct drm_psb_private { /* Register state */ struct psb_save_area regs; - /* MSI reg save */ - uint32_t msi_addr; - uint32_t msi_data; - /* Hotplug handling */ struct work_struct hotplug_work; @@ -530,10 +524,6 @@ struct drm_psb_private { struct drm_fb_helper *fb_helper; - /* Panel brightness */ - int brightness; - int brightness_adjusted; - bool dsr_enable; u32 dsr_fb_update; bool dpi_panel_on[3]; @@ -602,10 +592,13 @@ struct psb_ops { void (*disable_sr)(struct drm_device *dev); void (*lvds_bl_power)(struct drm_device *dev, bool on); -#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE + /* Backlight */ int (*backlight_init)(struct drm_device *dev); -#endif + void (*backlight_set)(struct drm_device *dev, int level); + int (*backlight_get)(struct drm_device *dev); + const char *backlight_name; + int i2c_bus; /* I2C bus identifier for Moorestown */ }; diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index 8ccba116821b..8a1111fe714b 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -197,8 +197,6 @@ extern void psb_intel_lvds_set_brightness(struct drm_device *dev, int level); extern void oaktrail_lvds_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev); extern void oaktrail_wait_for_INTR_PKT_SENT(struct drm_device *dev); -extern void oaktrail_dsi_init(struct drm_device *dev, - struct psb_intel_mode_device *mode_dev); struct gma_i2c_chan *oaktrail_lvds_i2c_init(struct drm_device *dev); extern void mid_dsi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int dsi_num); @@ -219,9 +217,6 @@ extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, int sdvoB); -extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector); -extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, - int enable); extern int intelfb_probe(struct drm_device *dev); extern int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c index e6e6d61bbeab..d421031462df 100644 --- a/drivers/gpu/drm/gma500/psb_irq.c +++ b/drivers/gpu/drm/gma500/psb_irq.c @@ -228,7 +228,7 @@ static irqreturn_t gma_irq_handler(int irq, void *arg) vdc_stat &= dev_priv->vdc_irq_mask; spin_unlock(&dev_priv->irqmask_lock); - if (dsp_int && gma_power_is_on(dev)) { + if (dsp_int) { gma_vdc_interrupt(dev, vdc_stat); handled = 1; } @@ -264,13 +264,12 @@ void gma_irq_preinstall(struct drm_device *dev) spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); - if (gma_power_is_on(dev)) { - PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); - PSB_WVDC32(0x00000000, PSB_INT_MASK_R); - PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R); - PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE); - PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); - } + PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); + PSB_WVDC32(0x00000000, PSB_INT_MASK_R); + PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R); + PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE); + PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); + if (dev->vblank[0].enabled) dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG; if (dev->vblank[1].enabled) @@ -316,17 +315,24 @@ void gma_irq_postinstall(struct drm_device *dev) spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); } -int gma_irq_install(struct drm_device *dev, unsigned int irq) +int gma_irq_install(struct drm_device *dev) { + struct drm_psb_private *dev_priv = to_drm_psb_private(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); int ret; - if (irq == IRQ_NOTCONNECTED) + if (dev_priv->use_msi && pci_enable_msi(pdev)) { + dev_warn(dev->dev, "Enabling MSI failed!\n"); + dev_priv->use_msi = false; + } + + if (pdev->irq == IRQ_NOTCONNECTED) return -ENOTCONN; gma_irq_preinstall(dev); /* PCI devices require shared interrupts. */ - ret = request_irq(irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev); + ret = request_irq(pdev->irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev); if (ret) return ret; @@ -369,6 +375,8 @@ void gma_irq_uninstall(struct drm_device *dev) spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); free_irq(pdev->irq, dev); + if (dev_priv->use_msi) + pci_disable_msi(pdev); } int gma_crtc_enable_vblank(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h index b51e395194ff..7648f69824a5 100644 --- a/drivers/gpu/drm/gma500/psb_irq.h +++ b/drivers/gpu/drm/gma500/psb_irq.h @@ -17,7 +17,7 @@ struct drm_device; void gma_irq_preinstall(struct drm_device *dev); void gma_irq_postinstall(struct drm_device *dev); -int gma_irq_install(struct drm_device *dev, unsigned int irq); +int gma_irq_install(struct drm_device *dev); void gma_irq_uninstall(struct drm_device *dev); int gma_crtc_enable_vblank(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 7ae3b7d67fcf..3efce05d7b57 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -23,6 +23,8 @@ config DRM_I915 # but for select to work, need to select ACPI_VIDEO's dependencies, ick select BACKLIGHT_CLASS_DEVICE if ACPI select INPUT if ACPI + select X86_PLATFORM_DEVICES if ACPI + select ACPI_WMI if ACPI select ACPI_VIDEO if ACPI select ACPI_BUTTON if ACPI select SYNC_FILE diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 522ef9b4aff3..a26edcdadc21 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -123,6 +123,7 @@ gt-y += \ gt/intel_ring.o \ gt/intel_ring_submission.o \ gt/intel_rps.o \ + gt/intel_sa_media.o \ gt/intel_sseu.o \ gt/intel_sseu_debugfs.o \ gt/intel_timeline.o \ @@ -257,7 +258,8 @@ i915-y += \ display/intel_vga.o \ display/i9xx_plane.o \ display/skl_scaler.o \ - display/skl_universal_plane.o + display/skl_universal_plane.o \ + display/skl_watermark.o i915-$(CONFIG_ACPI) += \ display/intel_acpi.o \ display/intel_opregion.o diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index 82ad8fe7440c..e3e3d27ffb53 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -1169,7 +1169,7 @@ intel_dp_hotplug(struct intel_encoder *encoder, static bool ibx_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin]; + u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin]; return intel_de_read(dev_priv, SDEISR) & bit; } @@ -1223,7 +1223,7 @@ static bool gm45_digital_port_connected(struct intel_encoder *encoder) static bool ilk_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin]; + u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin]; return intel_de_read(dev_priv, DEISR) & bit; } diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c index 861dcd2eb890..a5be4af792cb 100644 --- a/drivers/gpu/drm/i915/display/hsw_ips.c +++ b/drivers/gpu/drm/i915/display/hsw_ips.c @@ -202,7 +202,7 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) * Should measure whether using a lower cdclk w/o IPS */ if (IS_BROADWELL(i915) && - crtc_state->pixel_rate > i915->max_cdclk_freq * 95 / 100) + crtc_state->pixel_rate > i915->display.cdclk.max_cdclk_freq * 95 / 100) return false; return true; diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c index 0f35f2facdfc..5afbe3e98ee8 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.c +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -125,7 +125,7 @@ static struct intel_fbc *i9xx_plane_fbc(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane) { if (i9xx_plane_has_fbc(dev_priv, i9xx_plane)) - return dev_priv->fbc[INTEL_FBC_A]; + return dev_priv->display.fbc[INTEL_FBC_A]; else return NULL; } diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 5dcfa7feffa9..ed4d93942dbd 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -33,6 +33,7 @@ #include "icl_dsi_regs.h" #include "intel_atomic.h" #include "intel_backlight.h" +#include "intel_backlight_regs.h" #include "intel_combo_phy.h" #include "intel_combo_phy_regs.h" #include "intel_connector.h" @@ -641,13 +642,13 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder) u32 tmp; enum phy phy; - mutex_lock(&dev_priv->dpll.lock); + mutex_lock(&dev_priv->display.dpll.lock); tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp); - mutex_unlock(&dev_priv->dpll.lock); + mutex_unlock(&dev_priv->display.dpll.lock); } static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder) @@ -657,13 +658,13 @@ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder) u32 tmp; enum phy phy; - mutex_lock(&dev_priv->dpll.lock); + mutex_lock(&dev_priv->display.dpll.lock); tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp); - mutex_unlock(&dev_priv->dpll.lock); + mutex_unlock(&dev_priv->display.dpll.lock); } static bool gen11_dsi_is_clock_enabled(struct intel_encoder *encoder) @@ -693,7 +694,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder, enum phy phy; u32 val; - mutex_lock(&dev_priv->dpll.lock); + mutex_lock(&dev_priv->display.dpll.lock); val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) { @@ -709,7 +710,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder, intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0); - mutex_unlock(&dev_priv->dpll.lock); + mutex_unlock(&dev_priv->display.dpll.lock); } static void @@ -1629,6 +1630,8 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder, /* FIXME: initialize from VBT */ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; + vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay; + ret = intel_dsc_compute_params(crtc_state); if (ret) return ret; @@ -2070,8 +2073,11 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) else intel_dsi->ports = BIT(port); - intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports; - intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports; + if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports)) + intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports; + + if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports)) + intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports; for_each_dsi_port(port, intel_dsi->ports) { struct intel_dsi_host *host; diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c index e78430001f07..9df78e7caa2b 100644 --- a/drivers/gpu/drm/i915/display/intel_acpi.c +++ b/drivers/gpu/drm/i915/display/intel_acpi.c @@ -7,6 +7,7 @@ #include <linux/pci.h> #include <linux/acpi.h> +#include <acpi/video.h> #include "i915_drv.h" #include "intel_acpi.h" @@ -331,3 +332,29 @@ void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915) */ fwnode_handle_put(fwnode); } + +void intel_acpi_video_register(struct drm_i915_private *i915) +{ + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector; + + acpi_video_register(); + + /* + * If i915 is driving an internal panel without registering its native + * backlight handler try to register the acpi_video backlight. + * For panels not driven by i915 another GPU driver may still register + * a native backlight later and acpi_video_register_backlight() should + * only be called after any native backlights have been registered. + */ + drm_connector_list_iter_begin(&i915->drm, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + struct intel_panel *panel = &to_intel_connector(connector)->panel; + + if (panel->backlight.funcs && !panel->backlight.device) { + acpi_video_register_backlight(); + break; + } + } + drm_connector_list_iter_end(&conn_iter); +} diff --git a/drivers/gpu/drm/i915/display/intel_acpi.h b/drivers/gpu/drm/i915/display/intel_acpi.h index 4a760a2baed9..6a0007452f95 100644 --- a/drivers/gpu/drm/i915/display/intel_acpi.h +++ b/drivers/gpu/drm/i915/display/intel_acpi.h @@ -14,6 +14,7 @@ void intel_unregister_dsm_handler(void); void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915); void intel_acpi_device_id_update(struct drm_i915_private *i915); void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915); +void intel_acpi_video_register(struct drm_i915_private *i915); #else static inline void intel_register_dsm_handler(void) { return; } static inline void intel_unregister_dsm_handler(void) { return; } @@ -23,6 +24,8 @@ static inline void intel_acpi_device_id_update(struct drm_i915_private *i915) { return; } static inline void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915) { return; } +static inline +void intel_acpi_video_register(struct drm_i915_private *i915) { return; } #endif /* CONFIG_ACPI */ #endif /* __INTEL_ACPI_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index b94973b5633f..18f0a5ae3bac 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -62,9 +62,9 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector, struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(state); - if (property == dev_priv->force_audio_property) + if (property == dev_priv->display.properties.force_audio) *val = intel_conn_state->force_audio; - else if (property == dev_priv->broadcast_rgb_property) + else if (property == dev_priv->display.properties.broadcast_rgb) *val = intel_conn_state->broadcast_rgb; else { drm_dbg_atomic(&dev_priv->drm, @@ -95,12 +95,12 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector, struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(state); - if (property == dev_priv->force_audio_property) { + if (property == dev_priv->display.properties.force_audio) { intel_conn_state->force_audio = val; return 0; } - if (property == dev_priv->broadcast_rgb_property) { + if (property == dev_priv->display.properties.broadcast_rgb) { intel_conn_state->broadcast_rgb = val; return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index dd876dbbaa39..aaa6708256d5 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -42,9 +42,9 @@ #include "intel_display_types.h" #include "intel_fb.h" #include "intel_fb_pin.h" -#include "intel_pm.h" #include "intel_sprite.h" #include "skl_scaler.h" +#include "skl_watermark.h" static void intel_plane_state_reset(struct intel_plane_state *plane_state, struct intel_plane *plane) diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 6c9ee905f132..aacbc6da84ef 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -393,7 +393,7 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_audio_component *acomp = dev_priv->audio.component; + struct i915_audio_component *acomp = dev_priv->display.audio.component; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum port port = encoder->port; const struct dp_aud_n_m *nm; @@ -441,7 +441,7 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_audio_component *acomp = dev_priv->audio.component; + struct i915_audio_component *acomp = dev_priv->display.audio.component; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum port port = encoder->port; int n, rate; @@ -496,7 +496,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder, enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; u32 tmp; - mutex_lock(&dev_priv->audio.mutex); + mutex_lock(&dev_priv->display.audio.mutex); /* Disable timestamps */ tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder)); @@ -514,7 +514,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder, tmp &= ~AUDIO_OUTPUT_ENABLE(cpu_transcoder); intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp); - mutex_unlock(&dev_priv->audio.mutex); + mutex_unlock(&dev_priv->display.audio.mutex); } static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder, @@ -532,7 +532,7 @@ static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder, h_total = crtc_state->hw.adjusted_mode.crtc_htotal; pixel_clk = crtc_state->hw.adjusted_mode.crtc_clock; vdsc_bpp = crtc_state->dsc.compressed_bpp; - cdclk = i915->cdclk.hw.cdclk; + cdclk = i915->display.cdclk.hw.cdclk; /* fec= 0.972261, using rounding multiplier of 1000000 */ fec_coeff = 972261; link_clk = crtc_state->port_clock; @@ -639,7 +639,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder, u32 tmp; int len, i; - mutex_lock(&dev_priv->audio.mutex); + mutex_lock(&dev_priv->display.audio.mutex); /* Enable Audio WA for 4k DSC usecases */ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP)) @@ -677,7 +677,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder, /* Enable timestamps */ hsw_audio_config_update(encoder, crtc_state); - mutex_unlock(&dev_priv->audio.mutex); + mutex_unlock(&dev_priv->display.audio.mutex); } static void ilk_audio_codec_disable(struct intel_encoder *encoder, @@ -814,7 +814,7 @@ void intel_audio_codec_enable(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_audio_component *acomp = dev_priv->audio.component; + struct i915_audio_component *acomp = dev_priv->display.audio.component; struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_connector *connector = conn_state->connector; const struct drm_display_mode *adjusted_mode = @@ -838,17 +838,17 @@ void intel_audio_codec_enable(struct intel_encoder *encoder, connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; - if (dev_priv->audio.funcs) - dev_priv->audio.funcs->audio_codec_enable(encoder, - crtc_state, - conn_state); + if (dev_priv->display.funcs.audio) + dev_priv->display.funcs.audio->audio_codec_enable(encoder, + crtc_state, + conn_state); - mutex_lock(&dev_priv->audio.mutex); + mutex_lock(&dev_priv->display.audio.mutex); encoder->audio_connector = connector; /* referred in audio callbacks */ - dev_priv->audio.encoder_map[pipe] = encoder; - mutex_unlock(&dev_priv->audio.mutex); + dev_priv->display.audio.encoder_map[pipe] = encoder; + mutex_unlock(&dev_priv->display.audio.mutex); if (acomp && acomp->base.audio_ops && acomp->base.audio_ops->pin_eld_notify) { @@ -878,7 +878,7 @@ void intel_audio_codec_disable(struct intel_encoder *encoder, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_audio_component *acomp = dev_priv->audio.component; + struct i915_audio_component *acomp = dev_priv->display.audio.component; struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_connector *connector = old_conn_state->connector; enum port port = encoder->port; @@ -891,15 +891,15 @@ void intel_audio_codec_disable(struct intel_encoder *encoder, connector->base.id, connector->name, encoder->base.base.id, encoder->base.name, pipe_name(pipe)); - if (dev_priv->audio.funcs) - dev_priv->audio.funcs->audio_codec_disable(encoder, - old_crtc_state, - old_conn_state); + if (dev_priv->display.funcs.audio) + dev_priv->display.funcs.audio->audio_codec_disable(encoder, + old_crtc_state, + old_conn_state); - mutex_lock(&dev_priv->audio.mutex); + mutex_lock(&dev_priv->display.audio.mutex); encoder->audio_connector = NULL; - dev_priv->audio.encoder_map[pipe] = NULL; - mutex_unlock(&dev_priv->audio.mutex); + dev_priv->display.audio.encoder_map[pipe] = NULL; + mutex_unlock(&dev_priv->display.audio.mutex); if (acomp && acomp->base.audio_ops && acomp->base.audio_ops->pin_eld_notify) { @@ -935,13 +935,13 @@ static const struct intel_audio_funcs hsw_audio_funcs = { void intel_audio_hooks_init(struct drm_i915_private *dev_priv) { if (IS_G4X(dev_priv)) { - dev_priv->audio.funcs = &g4x_audio_funcs; + dev_priv->display.funcs.audio = &g4x_audio_funcs; } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - dev_priv->audio.funcs = &ilk_audio_funcs; + dev_priv->display.funcs.audio = &ilk_audio_funcs; } else if (IS_HASWELL(dev_priv) || DISPLAY_VER(dev_priv) >= 8) { - dev_priv->audio.funcs = &hsw_audio_funcs; + dev_priv->display.funcs.audio = &hsw_audio_funcs; } else if (HAS_PCH_SPLIT(dev_priv)) { - dev_priv->audio.funcs = &ilk_audio_funcs; + dev_priv->display.funcs.audio = &ilk_audio_funcs; } } @@ -971,7 +971,7 @@ void intel_audio_cdclk_change_post(struct drm_i915_private *i915) struct aud_ts_cdclk_m_n aud_ts; if (DISPLAY_VER(i915) >= 13) { - get_aud_ts_cdclk_m_n(i915->cdclk.hw.ref, i915->cdclk.hw.cdclk, &aud_ts); + get_aud_ts_cdclk_m_n(i915->display.cdclk.hw.ref, i915->display.cdclk.hw.cdclk, &aud_ts); intel_de_write(i915, AUD_TS_CDCLK_N, aud_ts.n); intel_de_write(i915, AUD_TS_CDCLK_M, aud_ts.m | AUD_TS_CDCLK_M_EN); @@ -1046,13 +1046,13 @@ static unsigned long i915_audio_component_get_power(struct device *kdev) ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO_PLAYBACK); - if (dev_priv->audio.power_refcount++ == 0) { + if (dev_priv->display.audio.power_refcount++ == 0) { if (DISPLAY_VER(dev_priv) >= 9) { intel_de_write(dev_priv, AUD_FREQ_CNTRL, - dev_priv->audio.freq_cntrl); + dev_priv->display.audio.freq_cntrl); drm_dbg_kms(&dev_priv->drm, "restored AUD_FREQ_CNTRL to 0x%x\n", - dev_priv->audio.freq_cntrl); + dev_priv->display.audio.freq_cntrl); } /* Force CDCLK to 2*BCLK as long as we need audio powered. */ @@ -1073,7 +1073,7 @@ static void i915_audio_component_put_power(struct device *kdev, struct drm_i915_private *dev_priv = kdev_to_i915(kdev); /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */ - if (--dev_priv->audio.power_refcount == 0) + if (--dev_priv->display.audio.power_refcount == 0) if (IS_GEMINILAKE(dev_priv)) glk_force_audio_cdclk(dev_priv, false); @@ -1119,7 +1119,7 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev) if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DDI(dev_priv))) return -ENODEV; - return dev_priv->cdclk.hw.cdclk; + return dev_priv->display.cdclk.hw.cdclk; } /* @@ -1140,10 +1140,10 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv, /* MST */ if (pipe >= 0) { if (drm_WARN_ON(&dev_priv->drm, - pipe >= ARRAY_SIZE(dev_priv->audio.encoder_map))) + pipe >= ARRAY_SIZE(dev_priv->display.audio.encoder_map))) return NULL; - encoder = dev_priv->audio.encoder_map[pipe]; + encoder = dev_priv->display.audio.encoder_map[pipe]; /* * when bootup, audio driver may not know it is * MST or not. So it will poll all the port & pipe @@ -1159,7 +1159,7 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv, return NULL; for_each_pipe(dev_priv, pipe) { - encoder = dev_priv->audio.encoder_map[pipe]; + encoder = dev_priv->display.audio.encoder_map[pipe]; if (encoder == NULL) continue; @@ -1177,7 +1177,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, int pipe, int rate) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); - struct i915_audio_component *acomp = dev_priv->audio.component; + struct i915_audio_component *acomp = dev_priv->display.audio.component; struct intel_encoder *encoder; struct intel_crtc *crtc; unsigned long cookie; @@ -1187,7 +1187,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, return 0; cookie = i915_audio_component_get_power(kdev); - mutex_lock(&dev_priv->audio.mutex); + mutex_lock(&dev_priv->display.audio.mutex); /* 1. get the pipe */ encoder = get_saved_enc(dev_priv, port, pipe); @@ -1206,7 +1206,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, hsw_audio_config_update(encoder, crtc->config); unlock: - mutex_unlock(&dev_priv->audio.mutex); + mutex_unlock(&dev_priv->display.audio.mutex); i915_audio_component_put_power(kdev, cookie); return err; } @@ -1220,13 +1220,13 @@ static int i915_audio_component_get_eld(struct device *kdev, int port, const u8 *eld; int ret = -EINVAL; - mutex_lock(&dev_priv->audio.mutex); + mutex_lock(&dev_priv->display.audio.mutex); intel_encoder = get_saved_enc(dev_priv, port, pipe); if (!intel_encoder) { drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n", port_name(port)); - mutex_unlock(&dev_priv->audio.mutex); + mutex_unlock(&dev_priv->display.audio.mutex); return ret; } @@ -1238,7 +1238,7 @@ static int i915_audio_component_get_eld(struct device *kdev, int port, memcpy(buf, eld, min(max_bytes, ret)); } - mutex_unlock(&dev_priv->audio.mutex); + mutex_unlock(&dev_priv->display.audio.mutex); return ret; } @@ -1273,7 +1273,7 @@ static int i915_audio_component_bind(struct device *i915_kdev, BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS); for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++) acomp->aud_sample_rate[i] = 0; - dev_priv->audio.component = acomp; + dev_priv->display.audio.component = acomp; drm_modeset_unlock_all(&dev_priv->drm); return 0; @@ -1288,14 +1288,14 @@ static void i915_audio_component_unbind(struct device *i915_kdev, drm_modeset_lock_all(&dev_priv->drm); acomp->base.ops = NULL; acomp->base.dev = NULL; - dev_priv->audio.component = NULL; + dev_priv->display.audio.component = NULL; drm_modeset_unlock_all(&dev_priv->drm); device_link_remove(hda_kdev, i915_kdev); - if (dev_priv->audio.power_refcount) + if (dev_priv->display.audio.power_refcount) drm_err(&dev_priv->drm, "audio power refcount %d after unbind\n", - dev_priv->audio.power_refcount); + dev_priv->display.audio.power_refcount); } static const struct component_ops i915_audio_component_bind_ops = { @@ -1359,13 +1359,13 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv) drm_dbg_kms(&dev_priv->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n", aud_freq, aud_freq_init); - dev_priv->audio.freq_cntrl = aud_freq; + dev_priv->display.audio.freq_cntrl = aud_freq; } /* init with current cdclk */ intel_audio_cdclk_change_post(dev_priv); - dev_priv->audio.component_registered = true; + dev_priv->display.audio.component_registered = true; } /** @@ -1377,11 +1377,11 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv) */ static void i915_audio_component_cleanup(struct drm_i915_private *dev_priv) { - if (!dev_priv->audio.component_registered) + if (!dev_priv->display.audio.component_registered) return; component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops); - dev_priv->audio.component_registered = false; + dev_priv->display.audio.component_registered = false; } /** @@ -1403,7 +1403,7 @@ void intel_audio_init(struct drm_i915_private *dev_priv) */ void intel_audio_deinit(struct drm_i915_private *dev_priv) { - if ((dev_priv)->audio.lpe.platdev != NULL) + if (dev_priv->display.audio.lpe.platdev != NULL) intel_lpe_audio_teardown(dev_priv); else i915_audio_component_cleanup(dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c index 110fc98ec280..beba39a38c87 100644 --- a/drivers/gpu/drm/i915/display/intel_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_backlight.c @@ -8,7 +8,10 @@ #include <linux/pwm.h> #include <linux/string_helpers.h> +#include <acpi/video.h> + #include "intel_backlight.h" +#include "intel_backlight_regs.h" #include "intel_connector.h" #include "intel_de.h" #include "intel_display_types.h" @@ -16,6 +19,8 @@ #include "intel_dsi_dcs_backlight.h" #include "intel_panel.h" #include "intel_pci_config.h" +#include "intel_pps.h" +#include "intel_quirks.h" /** * scale - scale values from one range to another @@ -86,7 +91,7 @@ u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 val) return val; if (dev_priv->params.invert_brightness > 0 || - dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { + intel_has_quirk(dev_priv, QUIRK_INVERT_BRIGHTNESS)) { return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min; } @@ -126,7 +131,7 @@ u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val) panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0); if (dev_priv->params.invert_brightness > 0 || - (dev_priv->params.invert_brightness == 0 && dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS)) + (dev_priv->params.invert_brightness == 0 && intel_has_quirk(dev_priv, QUIRK_INVERT_BRIGHTNESS))) val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min); return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max, @@ -303,7 +308,7 @@ void intel_backlight_set_acpi(const struct drm_connector_state *conn_state, if (!panel->backlight.present || !conn_state->crtc) return; - mutex_lock(&dev_priv->backlight_lock); + mutex_lock(&dev_priv->display.backlight.lock); drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0); @@ -319,7 +324,7 @@ void intel_backlight_set_acpi(const struct drm_connector_state *conn_state, if (panel->backlight.enabled) intel_panel_actually_set_backlight(conn_state, hw_level); - mutex_unlock(&dev_priv->backlight_lock); + mutex_unlock(&dev_priv->display.backlight.lock); } static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level) @@ -463,14 +468,14 @@ void intel_backlight_disable(const struct drm_connector_state *old_conn_state) return; } - mutex_lock(&dev_priv->backlight_lock); + mutex_lock(&dev_priv->display.backlight.lock); if (panel->backlight.device) panel->backlight.device->props.power = FB_BLANK_POWERDOWN; panel->backlight.enabled = false; panel->backlight.funcs->disable(old_conn_state, 0); - mutex_unlock(&dev_priv->backlight_lock); + mutex_unlock(&dev_priv->display.backlight.lock); } static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state, @@ -813,11 +818,11 @@ void intel_backlight_enable(const struct intel_crtc_state *crtc_state, drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(pipe)); - mutex_lock(&dev_priv->backlight_lock); + mutex_lock(&dev_priv->display.backlight.lock); __intel_backlight_enable(crtc_state, conn_state); - mutex_unlock(&dev_priv->backlight_lock); + mutex_unlock(&dev_priv->display.backlight.lock); } #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) @@ -827,12 +832,12 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector) struct intel_panel *panel = &connector->panel; u32 val = 0; - mutex_lock(&dev_priv->backlight_lock); + mutex_lock(&dev_priv->display.backlight.lock); if (panel->backlight.enabled) val = panel->backlight.funcs->get(connector, intel_connector_get_pipe(connector)); - mutex_unlock(&dev_priv->backlight_lock); + mutex_unlock(&dev_priv->display.backlight.lock); drm_dbg_kms(&dev_priv->drm, "get backlight PWM = %d\n", val); return val; @@ -860,7 +865,7 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta if (!panel->backlight.present) return; - mutex_lock(&dev_priv->backlight_lock); + mutex_lock(&dev_priv->display.backlight.lock); drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0); @@ -870,7 +875,7 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta if (panel->backlight.enabled) intel_panel_actually_set_backlight(conn_state, hw_level); - mutex_unlock(&dev_priv->backlight_lock); + mutex_unlock(&dev_priv->display.backlight.lock); } static int intel_backlight_device_update_status(struct backlight_device *bd) @@ -950,6 +955,11 @@ int intel_backlight_device_register(struct intel_connector *connector) WARN_ON(panel->backlight.max == 0); + if (!acpi_video_backlight_use_native()) { + drm_info(&i915->drm, "Skipping intel_backlight registration\n"); + return 0; + } + memset(&props, 0, sizeof(props)); props.type = BACKLIGHT_RAW; @@ -971,26 +981,24 @@ int intel_backlight_device_register(struct intel_connector *connector) if (!name) return -ENOMEM; - bd = backlight_device_register(name, connector->base.kdev, connector, - &intel_backlight_device_ops, &props); - - /* - * Using the same name independent of the drm device or connector - * prevents registration of multiple backlight devices in the - * driver. However, we need to use the default name for backward - * compatibility. Use unique names for subsequent backlight devices as a - * fallback when the default name already exists. - */ - if (IS_ERR(bd) && PTR_ERR(bd) == -EEXIST) { + bd = backlight_device_get_by_name(name); + if (bd) { + put_device(&bd->dev); + /* + * Using the same name independent of the drm device or connector + * prevents registration of multiple backlight devices in the + * driver. However, we need to use the default name for backward + * compatibility. Use unique names for subsequent backlight devices as a + * fallback when the default name already exists. + */ kfree(name); name = kasprintf(GFP_KERNEL, "card%d-%s-backlight", i915->drm.primary->index, connector->base.name); if (!name) return -ENOMEM; - - bd = backlight_device_register(name, connector->base.kdev, connector, - &intel_backlight_device_ops, &props); } + bd = backlight_device_register(name, connector->base.kdev, connector, + &intel_backlight_device_ops, &props); if (IS_ERR(bd)) { drm_err(&i915->drm, @@ -1113,7 +1121,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) if (IS_PINEVIEW(dev_priv)) clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq); else - clock = KHz(dev_priv->cdclk.hw.cdclk); + clock = KHz(dev_priv->display.cdclk.hw.cdclk); return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32); } @@ -1131,7 +1139,7 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) if (IS_G4X(dev_priv)) clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq); else - clock = KHz(dev_priv->cdclk.hw.cdclk); + clock = KHz(dev_priv->display.cdclk.hw.cdclk); return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128); } @@ -1591,11 +1599,11 @@ void intel_backlight_update(struct intel_atomic_state *state, if (!panel->backlight.present) return; - mutex_lock(&dev_priv->backlight_lock); + mutex_lock(&dev_priv->display.backlight.lock); if (!panel->backlight.enabled) __intel_backlight_enable(crtc_state, conn_state); - mutex_unlock(&dev_priv->backlight_lock); + mutex_unlock(&dev_priv->display.backlight.lock); } int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe) @@ -1605,7 +1613,7 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe) int ret; if (!connector->panel.vbt.backlight.present) { - if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) { + if (intel_has_quirk(dev_priv, QUIRK_BACKLIGHT_PRESENT)) { drm_dbg_kms(&dev_priv->drm, "no backlight present per VBT, but present per quirk\n"); } else { @@ -1620,9 +1628,9 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe) return -ENODEV; /* set level and max in panel struct */ - mutex_lock(&dev_priv->backlight_lock); + mutex_lock(&dev_priv->display.backlight.lock); ret = panel->backlight.funcs->setup(connector, pipe); - mutex_unlock(&dev_priv->backlight_lock); + mutex_unlock(&dev_priv->display.backlight.lock); if (ret) { drm_dbg_kms(&dev_priv->drm, @@ -1773,9 +1781,13 @@ void intel_backlight_init_funcs(struct intel_panel *panel) panel->backlight.pwm_funcs = &i9xx_pwm_funcs; } - if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP && - intel_dp_aux_init_backlight_funcs(connector) == 0) - return; + if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) { + if (intel_dp_aux_init_backlight_funcs(connector) == 0) + return; + + if (!intel_has_quirk(dev_priv, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK)) + connector->panel.backlight.power = intel_pps_backlight_power; + } /* We're using a standard PWM backlight interface */ panel->backlight.funcs = &pwm_bl_funcs; diff --git a/drivers/gpu/drm/i915/display/intel_backlight_regs.h b/drivers/gpu/drm/i915/display/intel_backlight_regs.h new file mode 100644 index 000000000000..50c1210f6d5d --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_backlight_regs.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_BACKLIGHT_REGS_H__ +#define __INTEL_BACKLIGHT_REGS_H__ + +#include "i915_reg_defs.h" + +#define _VLV_BLC_PWM_CTL2_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61250) +#define _VLV_BLC_PWM_CTL2_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61350) +#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \ + _VLV_BLC_PWM_CTL2_B) + +#define _VLV_BLC_PWM_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61254) +#define _VLV_BLC_PWM_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61354) +#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \ + _VLV_BLC_PWM_CTL_B) + +#define _VLV_BLC_HIST_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61260) +#define _VLV_BLC_HIST_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61360) +#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \ + _VLV_BLC_HIST_CTL_B) + +/* Backlight control */ +#define BLC_PWM_CTL2 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61250) /* 965+ only */ +#define BLM_PWM_ENABLE (1 << 31) +#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */ +#define BLM_PIPE_SELECT (1 << 29) +#define BLM_PIPE_SELECT_IVB (3 << 29) +#define BLM_PIPE_A (0 << 29) +#define BLM_PIPE_B (1 << 29) +#define BLM_PIPE_C (2 << 29) /* ivb + */ +#define BLM_TRANSCODER_A BLM_PIPE_A /* hsw */ +#define BLM_TRANSCODER_B BLM_PIPE_B +#define BLM_TRANSCODER_C BLM_PIPE_C +#define BLM_TRANSCODER_EDP (3 << 29) +#define BLM_PIPE(pipe) ((pipe) << 29) +#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */ +#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26) +#define BLM_PHASE_IN_ENABLE (1 << 25) +#define BLM_PHASE_IN_INTERUPT_ENABL (1 << 24) +#define BLM_PHASE_IN_TIME_BASE_SHIFT (16) +#define BLM_PHASE_IN_TIME_BASE_MASK (0xff << 16) +#define BLM_PHASE_IN_COUNT_SHIFT (8) +#define BLM_PHASE_IN_COUNT_MASK (0xff << 8) +#define BLM_PHASE_IN_INCR_SHIFT (0) +#define BLM_PHASE_IN_INCR_MASK (0xff << 0) +#define BLC_PWM_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61254) +/* + * This is the most significant 15 bits of the number of backlight cycles in a + * complete cycle of the modulated backlight control. + * + * The actual value is this field multiplied by two. + */ +#define BACKLIGHT_MODULATION_FREQ_SHIFT (17) +#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) +#define BLM_LEGACY_MODE (1 << 16) /* gen2 only */ +/* + * This is the number of cycles out of the backlight modulation cycle for which + * the backlight is on. + * + * This field must be no greater than the number of cycles in the complete + * backlight modulation cycle. + */ +#define BACKLIGHT_DUTY_CYCLE_SHIFT (0) +#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) +#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe) +#define BLM_POLARITY_PNV (1 << 0) /* pnv only */ + +#define BLC_HIST_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61260) +#define BLM_HISTOGRAM_ENABLE (1 << 31) + +/* New registers for PCH-split platforms. Safe where new bits show up, the + * register layout machtes with gen4 BLC_PWM_CTL[12]. */ +#define BLC_PWM_CPU_CTL2 _MMIO(0x48250) +#define BLC_PWM_CPU_CTL _MMIO(0x48254) + +#define HSW_BLC_PWM2_CTL _MMIO(0x48350) + +/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is + * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */ +#define BLC_PWM_PCH_CTL1 _MMIO(0xc8250) +#define BLM_PCH_PWM_ENABLE (1 << 31) +#define BLM_PCH_OVERRIDE_ENABLE (1 << 30) +#define BLM_PCH_POLARITY (1 << 29) +#define BLC_PWM_PCH_CTL2 _MMIO(0xc8254) + +/* BXT backlight register definition. */ +#define _BXT_BLC_PWM_CTL1 0xC8250 +#define BXT_BLC_PWM_ENABLE (1 << 31) +#define BXT_BLC_PWM_POLARITY (1 << 29) +#define _BXT_BLC_PWM_FREQ1 0xC8254 +#define _BXT_BLC_PWM_DUTY1 0xC8258 + +#define _BXT_BLC_PWM_CTL2 0xC8350 +#define _BXT_BLC_PWM_FREQ2 0xC8354 +#define _BXT_BLC_PWM_DUTY2 0xC8358 + +#define BXT_BLC_PWM_CTL(controller) _MMIO_PIPE(controller, \ + _BXT_BLC_PWM_CTL1, _BXT_BLC_PWM_CTL2) +#define BXT_BLC_PWM_FREQ(controller) _MMIO_PIPE(controller, \ + _BXT_BLC_PWM_FREQ1, _BXT_BLC_PWM_FREQ2) +#define BXT_BLC_PWM_DUTY(controller) _MMIO_PIPE(controller, \ + _BXT_BLC_PWM_DUTY1, _BXT_BLC_PWM_DUTY2) + +/* Utility pin */ +#define UTIL_PIN_CTL _MMIO(0x48400) +#define UTIL_PIN_ENABLE (1 << 31) +#define UTIL_PIN_PIPE_MASK (3 << 29) +#define UTIL_PIN_PIPE(x) ((x) << 29) +#define UTIL_PIN_MODE_MASK (0xf << 24) +#define UTIL_PIN_MODE_DATA (0 << 24) +#define UTIL_PIN_MODE_PWM (1 << 24) +#define UTIL_PIN_MODE_VBLANK (4 << 24) +#define UTIL_PIN_MODE_VSYNC (5 << 24) +#define UTIL_PIN_MODE_EYE_LEVEL (8 << 24) +#define UTIL_PIN_OUTPUT_DATA (1 << 23) +#define UTIL_PIN_POLARITY (1 << 22) +#define UTIL_PIN_DIRECTION_INPUT (1 << 19) +#define UTIL_PIN_INPUT_DATA (1 << 16) + +#endif /* __INTEL_BACKLIGHT_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 51dde5bfd956..28bdb936cd1f 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -135,18 +135,6 @@ static u32 raw_block_offset(const void *bdb, enum bdb_block_id section_id) return block - bdb; } -/* size of the block excluding the header */ -static u32 raw_block_size(const void *bdb, enum bdb_block_id section_id) -{ - const void *block; - - block = find_raw_section(bdb, section_id); - if (!block) - return 0; - - return get_blocksize(block); -} - struct bdb_block_entry { struct list_head node; enum bdb_block_id section_id; @@ -159,7 +147,7 @@ find_section(struct drm_i915_private *i915, { struct bdb_block_entry *entry; - list_for_each_entry(entry, &i915->vbt.bdb_blocks, node) { + list_for_each_entry(entry, &i915->display.vbt.bdb_blocks, node) { if (entry->section_id == section_id) return entry->data + 3; } @@ -231,9 +219,14 @@ static bool validate_lfp_data_ptrs(const void *bdb, { int fp_timing_size, dvo_timing_size, panel_pnp_id_size, panel_name_size; int data_block_size, lfp_data_size; + const void *data_block; int i; - data_block_size = raw_block_size(bdb, BDB_LVDS_LFP_DATA); + data_block = find_raw_section(bdb, BDB_LVDS_LFP_DATA); + if (!data_block) + return false; + + data_block_size = get_blocksize(data_block); if (data_block_size == 0) return false; @@ -261,21 +254,6 @@ static bool validate_lfp_data_ptrs(const void *bdb, if (16 * lfp_data_size > data_block_size) return false; - /* - * Except for vlv/chv machines all real VBTs seem to have 6 - * unaccounted bytes in the fp_timing table. And it doesn't - * appear to be a really intentional hole as the fp_timing - * 0xffff terminator is always within those 6 missing bytes. - */ - if (fp_timing_size + dvo_timing_size + panel_pnp_id_size != lfp_data_size && - fp_timing_size + 6 + dvo_timing_size + panel_pnp_id_size != lfp_data_size) - return false; - - if (ptrs->ptr[0].fp_timing.offset + fp_timing_size > ptrs->ptr[0].dvo_timing.offset || - ptrs->ptr[0].dvo_timing.offset + dvo_timing_size != ptrs->ptr[0].panel_pnp_id.offset || - ptrs->ptr[0].panel_pnp_id.offset + panel_pnp_id_size != lfp_data_size) - return false; - /* make sure the table entries have uniform size */ for (i = 1; i < 16; i++) { if (ptrs->ptr[i].fp_timing.table_size != fp_timing_size || @@ -289,6 +267,23 @@ static bool validate_lfp_data_ptrs(const void *bdb, return false; } + /* + * Except for vlv/chv machines all real VBTs seem to have 6 + * unaccounted bytes in the fp_timing table. And it doesn't + * appear to be a really intentional hole as the fp_timing + * 0xffff terminator is always within those 6 missing bytes. + */ + if (fp_timing_size + 6 + dvo_timing_size + panel_pnp_id_size == lfp_data_size) + fp_timing_size += 6; + + if (fp_timing_size + dvo_timing_size + panel_pnp_id_size != lfp_data_size) + return false; + + if (ptrs->ptr[0].fp_timing.offset + fp_timing_size != ptrs->ptr[0].dvo_timing.offset || + ptrs->ptr[0].dvo_timing.offset + dvo_timing_size != ptrs->ptr[0].panel_pnp_id.offset || + ptrs->ptr[0].panel_pnp_id.offset + panel_pnp_id_size != lfp_data_size) + return false; + /* make sure the tables fit inside the data block */ for (i = 0; i < 16; i++) { if (ptrs->ptr[i].fp_timing.offset + fp_timing_size > data_block_size || @@ -300,6 +295,15 @@ static bool validate_lfp_data_ptrs(const void *bdb, if (ptrs->panel_name.offset + 16 * panel_name_size > data_block_size) return false; + /* make sure fp_timing terminators are present at expected locations */ + for (i = 0; i < 16; i++) { + const u16 *t = data_block + ptrs->ptr[i].fp_timing.offset + + fp_timing_size - 2; + + if (*t != 0xffff) + return false; + } + return true; } @@ -333,18 +337,6 @@ static bool fixup_lfp_data_ptrs(const void *bdb, void *ptrs_block) return validate_lfp_data_ptrs(bdb, ptrs); } -static const void *find_fp_timing_terminator(const u8 *data, int size) -{ - int i; - - for (i = 0; i < size - 1; i++) { - if (data[i] == 0xff && data[i+1] == 0xff) - return &data[i]; - } - - return NULL; -} - static int make_lfp_data_ptr(struct lvds_lfp_data_ptr_table *table, int table_size, int total_size) { @@ -368,11 +360,22 @@ static void next_lfp_data_ptr(struct lvds_lfp_data_ptr_table *next, static void *generate_lfp_data_ptrs(struct drm_i915_private *i915, const void *bdb) { - int i, size, table_size, block_size, offset; - const void *t0, *t1, *block; + int i, size, table_size, block_size, offset, fp_timing_size; struct bdb_lvds_lfp_data_ptrs *ptrs; + const void *block; void *ptrs_block; + /* + * The hardcoded fp_timing_size is only valid for + * modernish VBTs. All older VBTs definitely should + * include block 41 and thus we don't need to + * generate one. + */ + if (i915->display.vbt.version < 155) + return NULL; + + fp_timing_size = 38; + block = find_raw_section(bdb, BDB_LVDS_LFP_DATA); if (!block) return NULL; @@ -381,17 +384,8 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915, block_size = get_blocksize(block); - size = block_size; - t0 = find_fp_timing_terminator(block, size); - if (!t0) - return NULL; - - size -= t0 - block - 2; - t1 = find_fp_timing_terminator(t0 + 2, size); - if (!t1) - return NULL; - - size = t1 - t0; + size = fp_timing_size + sizeof(struct lvds_dvo_timing) + + sizeof(struct lvds_pnp_id); if (size * 16 > block_size) return NULL; @@ -409,7 +403,7 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915, table_size = sizeof(struct lvds_dvo_timing); size = make_lfp_data_ptr(&ptrs->ptr[0].dvo_timing, table_size, size); - table_size = t0 - block + 2; + table_size = fp_timing_size; size = make_lfp_data_ptr(&ptrs->ptr[0].fp_timing, table_size, size); if (ptrs->ptr[0].fp_timing.table_size) @@ -424,14 +418,14 @@ static void *generate_lfp_data_ptrs(struct drm_i915_private *i915, return NULL; } - size = t1 - t0; + size = fp_timing_size + sizeof(struct lvds_dvo_timing) + + sizeof(struct lvds_pnp_id); for (i = 1; i < 16; i++) { next_lfp_data_ptr(&ptrs->ptr[i].fp_timing, &ptrs->ptr[i-1].fp_timing, size); next_lfp_data_ptr(&ptrs->ptr[i].dvo_timing, &ptrs->ptr[i-1].dvo_timing, size); next_lfp_data_ptr(&ptrs->ptr[i].panel_pnp_id, &ptrs->ptr[i-1].panel_pnp_id, size); } - size = t1 - t0; table_size = sizeof(struct lvds_lfp_panel_name); if (16 * (size + table_size) <= block_size) { @@ -479,6 +473,13 @@ init_bdb_block(struct drm_i915_private *i915, block_size = get_blocksize(block); + /* + * Version number and new block size are considered + * part of the header for MIPI sequenece block v3+. + */ + if (section_id == BDB_MIPI_SEQUENCE && *(const u8 *)block >= 3) + block_size += 5; + entry = kzalloc(struct_size(entry, data, max(min_size, block_size) + 3), GFP_KERNEL); if (!entry) { @@ -501,7 +502,7 @@ init_bdb_block(struct drm_i915_private *i915, return; } - list_add_tail(&entry->node, &i915->vbt.bdb_blocks); + list_add_tail(&entry->node, &i915->display.vbt.bdb_blocks); } static void init_bdb_blocks(struct drm_i915_private *i915, @@ -604,6 +605,19 @@ get_lfp_data_tail(const struct bdb_lvds_lfp_data *data, return NULL; } +static void dump_pnp_id(struct drm_i915_private *i915, + const struct lvds_pnp_id *pnp_id, + const char *name) +{ + u16 mfg_name = be16_to_cpu((__force __be16)pnp_id->mfg_name); + char vend[4]; + + drm_dbg_kms(&i915->drm, "%s PNPID mfg: %s (0x%x), prod: %u, serial: %u, week: %d, year: %d\n", + name, drm_edid_decode_mfg_id(mfg_name, vend), + pnp_id->mfg_name, pnp_id->product_code, pnp_id->serial, + pnp_id->mfg_week, pnp_id->mfg_year + 1990); +} + static int opregion_get_panel_type(struct drm_i915_private *i915, const struct intel_bios_encoder_data *devdata, const struct edid *edid) @@ -655,6 +669,8 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915, edid_id_nodate.mfg_week = 0; edid_id_nodate.mfg_year = 0; + dump_pnp_id(i915, edid_id, "EDID"); + ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS); if (!ptrs) return -1; @@ -861,6 +877,7 @@ parse_lfp_data(struct drm_i915_private *i915, const struct bdb_lvds_lfp_data *data; const struct bdb_lvds_lfp_data_tail *tail; const struct bdb_lvds_lfp_data_ptrs *ptrs; + const struct lvds_pnp_id *pnp_id; int panel_type = panel->vbt.panel_type; ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS); @@ -874,11 +891,18 @@ parse_lfp_data(struct drm_i915_private *i915, if (!panel->vbt.lfp_lvds_vbt_mode) parse_lfp_panel_dtd(i915, panel, data, ptrs); + pnp_id = get_lvds_pnp_id(data, ptrs, panel_type); + dump_pnp_id(i915, pnp_id, "Panel"); + tail = get_lfp_data_tail(data, ptrs); if (!tail) return; - if (i915->vbt.version >= 188) { + drm_dbg_kms(&i915->drm, "Panel name: %.*s\n", + (int)sizeof(tail->panel_name[0].name), + tail->panel_name[panel_type].name); + + if (i915->display.vbt.version >= 188) { panel->vbt.seamless_drrs_min_refresh_rate = tail->seamless_drrs_min_refresh_rate[panel_type]; drm_dbg_kms(&i915->drm, @@ -904,7 +928,7 @@ parse_generic_dtd(struct drm_i915_private *i915, * first on VBT >= 229, but still fall back to trying the old LFP * block if that fails. */ - if (i915->vbt.version < 229) + if (i915->display.vbt.version < 229) return; generic_dtd = find_section(i915, BDB_GENERIC_DTD); @@ -1008,12 +1032,12 @@ parse_lfp_backlight(struct drm_i915_private *i915, } panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI; - if (i915->vbt.version >= 191) { + if (i915->display.vbt.version >= 191) { size_t exp_size; - if (i915->vbt.version >= 236) + if (i915->display.vbt.version >= 236) exp_size = sizeof(struct bdb_lfp_backlight_data); - else if (i915->vbt.version >= 234) + else if (i915->display.vbt.version >= 234) exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_234; else exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_191; @@ -1030,14 +1054,14 @@ parse_lfp_backlight(struct drm_i915_private *i915, panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; panel->vbt.backlight.active_low_pwm = entry->active_low_pwm; - if (i915->vbt.version >= 234) { + if (i915->display.vbt.version >= 234) { u16 min_level; bool scale; level = backlight_data->brightness_level[panel_type].level; min_level = backlight_data->brightness_min_level[panel_type].level; - if (i915->vbt.version >= 236) + if (i915->display.vbt.version >= 236) scale = backlight_data->brightness_precision_bits[panel_type] == 16; else scale = level > 255; @@ -1134,37 +1158,37 @@ parse_general_features(struct drm_i915_private *i915) if (!general) return; - i915->vbt.int_tv_support = general->int_tv_support; + i915->display.vbt.int_tv_support = general->int_tv_support; /* int_crt_support can't be trusted on earlier platforms */ - if (i915->vbt.version >= 155 && + if (i915->display.vbt.version >= 155 && (HAS_DDI(i915) || IS_VALLEYVIEW(i915))) - i915->vbt.int_crt_support = general->int_crt_support; - i915->vbt.lvds_use_ssc = general->enable_ssc; - i915->vbt.lvds_ssc_freq = + i915->display.vbt.int_crt_support = general->int_crt_support; + i915->display.vbt.lvds_use_ssc = general->enable_ssc; + i915->display.vbt.lvds_ssc_freq = intel_bios_ssc_frequency(i915, general->ssc_freq); - i915->vbt.display_clock_mode = general->display_clock_mode; - i915->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted; - if (i915->vbt.version >= 181) { - i915->vbt.orientation = general->rotate_180 ? + i915->display.vbt.display_clock_mode = general->display_clock_mode; + i915->display.vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted; + if (i915->display.vbt.version >= 181) { + i915->display.vbt.orientation = general->rotate_180 ? DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP : DRM_MODE_PANEL_ORIENTATION_NORMAL; } else { - i915->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN; + i915->display.vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN; } - if (i915->vbt.version >= 249 && general->afc_startup_config) { - i915->vbt.override_afc_startup = true; - i915->vbt.override_afc_startup_val = general->afc_startup_config == 0x1 ? 0x0 : 0x7; + if (i915->display.vbt.version >= 249 && general->afc_startup_config) { + i915->display.vbt.override_afc_startup = true; + i915->display.vbt.override_afc_startup_val = general->afc_startup_config == 0x1 ? 0x0 : 0x7; } drm_dbg_kms(&i915->drm, "BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n", - i915->vbt.int_tv_support, - i915->vbt.int_crt_support, - i915->vbt.lvds_use_ssc, - i915->vbt.lvds_ssc_freq, - i915->vbt.display_clock_mode, - i915->vbt.fdi_rx_polarity_inverted); + i915->display.vbt.int_tv_support, + i915->display.vbt.int_crt_support, + i915->display.vbt.lvds_use_ssc, + i915->display.vbt.lvds_ssc_freq, + i915->display.vbt.display_clock_mode, + i915->display.vbt.fdi_rx_polarity_inverted); } static const struct child_device_config * @@ -1190,7 +1214,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915) return; } - list_for_each_entry(devdata, &i915->vbt.display_devices, node) { + list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { child = &devdata->child; if (child->slave_addr != SLAVE_ADDR1 && @@ -1214,7 +1238,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915) child->slave_addr, (child->dvo_port == DEVICE_PORT_DVOB) ? "SDVOB" : "SDVOC"); - mapping = &i915->vbt.sdvo_mappings[child->dvo_port - 1]; + mapping = &i915->display.vbt.sdvo_mappings[child->dvo_port - 1]; if (!mapping->initialized) { mapping->dvo_port = child->dvo_port; mapping->slave_addr = child->slave_addr; @@ -1265,7 +1289,7 @@ parse_driver_features(struct drm_i915_private *i915) * interpretation, but real world VBTs seem to. */ if (driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS) - i915->vbt.int_lvds_support = 0; + i915->display.vbt.int_lvds_support = 0; } else { /* * FIXME it's not clear which BDB version has the LVDS config @@ -1278,10 +1302,10 @@ parse_driver_features(struct drm_i915_private *i915) * in the wild with the bits correctly populated. Version * 108 (on i85x) does not have the bits correctly populated. */ - if (i915->vbt.version >= 134 && + if (i915->display.vbt.version >= 134 && driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS && driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS) - i915->vbt.int_lvds_support = 0; + i915->display.vbt.int_lvds_support = 0; } } @@ -1295,7 +1319,7 @@ parse_panel_driver_features(struct drm_i915_private *i915, if (!driver) return; - if (i915->vbt.version < 228) { + if (i915->display.vbt.version < 228) { drm_dbg_kms(&i915->drm, "DRRS State Enabled:%d\n", driver->drrs_enabled); /* @@ -1328,7 +1352,7 @@ parse_power_conservation_features(struct drm_i915_private *i915, panel->vbt.vrr = true; /* matches Windows behaviour */ - if (i915->vbt.version < 228) + if (i915->display.vbt.version < 228) return; power = find_section(i915, BDB_LFP_POWER); @@ -1354,10 +1378,10 @@ parse_power_conservation_features(struct drm_i915_private *i915, panel->vbt.drrs_type = DRRS_TYPE_NONE; } - if (i915->vbt.version >= 232) + if (i915->display.vbt.version >= 232) panel->vbt.edp.hobl = panel_bool(power->hobl, panel_type); - if (i915->vbt.version >= 233) + if (i915->display.vbt.version >= 233) panel->vbt.vrr = panel_bool(power->vrr_feature_enabled, panel_type); } @@ -1393,7 +1417,7 @@ parse_edp(struct drm_i915_private *i915, panel->vbt.edp.pps = *edp_pps; - if (i915->vbt.version >= 224) { + if (i915->display.vbt.version >= 224) { panel->vbt.edp.rate = edp->edp_fast_link_training_rate[panel_type] * 20; } else { @@ -1472,7 +1496,7 @@ parse_edp(struct drm_i915_private *i915, break; } - if (i915->vbt.version >= 173) { + if (i915->display.vbt.version >= 173) { u8 vswing; /* Don't read from VBT if module parameter has valid value*/ @@ -1488,7 +1512,7 @@ parse_edp(struct drm_i915_private *i915, panel->vbt.edp.drrs_msa_timing_delay = panel_bits(edp->sdrrs_msa_timing_delay, panel_type, 2); - if (i915->vbt.version >= 244) + if (i915->display.vbt.version >= 244) panel->vbt.edp.max_link_rate = edp->edp_max_port_link_rate[panel_type] * 20; } @@ -1520,7 +1544,7 @@ parse_psr(struct drm_i915_private *i915, * New psr options 0=500us, 1=100us, 2=2500us, 3=0us * Old decimal value is wake up time in multiples of 100 us. */ - if (i915->vbt.version >= 205 && + if (i915->display.vbt.version >= 205 && (DISPLAY_VER(i915) >= 9 && !IS_BROXTON(i915))) { switch (psr_table->tp1_wakeup_time) { case 0: @@ -1566,7 +1590,7 @@ parse_psr(struct drm_i915_private *i915, panel->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100; } - if (i915->vbt.version >= 226) { + if (i915->display.vbt.version >= 226) { u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time; wakeup_time = panel_bits(wakeup_time, panel_type, 2); @@ -1596,7 +1620,9 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915, struct intel_panel *panel, enum port port) { - if (!panel->vbt.dsi.config->dual_link || i915->vbt.version < 197) { + enum port port_bc = DISPLAY_VER(i915) >= 11 ? PORT_B : PORT_C; + + if (!panel->vbt.dsi.config->dual_link || i915->display.vbt.version < 197) { panel->vbt.dsi.bl_ports = BIT(port); if (panel->vbt.dsi.config->cabc_supported) panel->vbt.dsi.cabc_ports = BIT(port); @@ -1609,11 +1635,11 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915, panel->vbt.dsi.bl_ports = BIT(PORT_A); break; case DL_DCS_PORT_C: - panel->vbt.dsi.bl_ports = BIT(PORT_C); + panel->vbt.dsi.bl_ports = BIT(port_bc); break; default: case DL_DCS_PORT_A_AND_C: - panel->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C); + panel->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(port_bc); break; } @@ -1625,12 +1651,12 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915, panel->vbt.dsi.cabc_ports = BIT(PORT_A); break; case DL_DCS_PORT_C: - panel->vbt.dsi.cabc_ports = BIT(PORT_C); + panel->vbt.dsi.cabc_ports = BIT(port_bc); break; default: case DL_DCS_PORT_A_AND_C: panel->vbt.dsi.cabc_ports = - BIT(PORT_A) | BIT(PORT_C); + BIT(PORT_A) | BIT(port_bc); break; } } @@ -2051,7 +2077,7 @@ parse_compression_parameters(struct drm_i915_private *i915) u16 block_size; int index; - if (i915->vbt.version < 198) + if (i915->display.vbt.version < 198) return; params = find_section(i915, BDB_COMPRESSION_PARAMETERS); @@ -2071,7 +2097,7 @@ parse_compression_parameters(struct drm_i915_private *i915) } } - list_for_each_entry(devdata, &i915->vbt.display_devices, node) { + list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { child = &devdata->child; if (!child->compression_enable) @@ -2205,7 +2231,7 @@ static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin) return PORT_NONE; for_each_port(port) { - devdata = i915->vbt.ports[port]; + devdata = i915->display.vbt.ports[port]; if (devdata && ddc_pin == devdata->child.ddc_pin) return port; @@ -2254,7 +2280,7 @@ static void sanitize_ddc_pin(struct intel_bios_encoder_data *devdata, * there are real machines (eg. Asrock B250M-HDV) where VBT has both * port A and port E with the same AUX ch and we must pick port E :( */ - child = &i915->vbt.ports[p]->child; + child = &i915->display.vbt.ports[p]->child; child->device_type &= ~DEVICE_TYPE_TMDS_DVI_SIGNALING; child->device_type |= DEVICE_TYPE_NOT_HDMI_OUTPUT; @@ -2271,7 +2297,7 @@ static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch) return PORT_NONE; for_each_port(port) { - devdata = i915->vbt.ports[port]; + devdata = i915->display.vbt.ports[port]; if (devdata && aux_ch == devdata->child.aux_channel) return port; @@ -2306,7 +2332,7 @@ static void sanitize_aux_ch(struct intel_bios_encoder_data *devdata, * there are real machines (eg. Asrock B250M-HDV) where VBT has both * port A and port E with the same AUX ch and we must pick port E :( */ - child = &i915->vbt.ports[p]->child; + child = &i915->display.vbt.ports[p]->child; child->device_type &= ~DEVICE_TYPE_DISPLAYPORT_OUTPUT; child->aux_channel = 0; @@ -2418,7 +2444,7 @@ static enum port dvo_port_to_port(struct drm_i915_private *i915, [PORT_TC4] = { DVO_PORT_HDMII, DVO_PORT_DPI, -1 }, }; - if (DISPLAY_VER(i915) == 13) + if (DISPLAY_VER(i915) >= 13) return __dvo_port_to_port(ARRAY_SIZE(xelpd_port_mapping), ARRAY_SIZE(xelpd_port_mapping[0]), xelpd_port_mapping, @@ -2480,15 +2506,23 @@ static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate) static int _intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata) { - if (!devdata || devdata->i915->vbt.version < 216) + if (!devdata || devdata->i915->display.vbt.version < 216) return 0; - if (devdata->i915->vbt.version >= 230) + if (devdata->i915->display.vbt.version >= 230) return parse_bdb_230_dp_max_link_rate(devdata->child.dp_max_link_rate); else return parse_bdb_216_dp_max_link_rate(devdata->child.dp_max_link_rate); } +static int _intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata) +{ + if (!devdata || devdata->i915->display.vbt.version < 244) + return 0; + + return devdata->child.dp_max_lane_count + 1; +} + static void sanitize_device_type(struct intel_bios_encoder_data *devdata, enum port port) { @@ -2544,7 +2578,7 @@ intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata) static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata) { - if (!devdata || devdata->i915->vbt.version < 158) + if (!devdata || devdata->i915->display.vbt.version < 158) return -1; return devdata->child.hdmi_level_shifter_value; @@ -2552,7 +2586,7 @@ static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *de static int _intel_bios_max_tmds_clock(const struct intel_bios_encoder_data *devdata) { - if (!devdata || devdata->i915->vbt.version < 204) + if (!devdata || devdata->i915->display.vbt.version < 204) return 0; switch (devdata->child.hdmi_max_data_rate) { @@ -2661,7 +2695,7 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata) return; } - if (i915->vbt.ports[port]) { + if (i915->display.vbt.ports[port]) { drm_dbg_kms(&i915->drm, "More than one child device for port %c in VBT, using the first.\n", port_name(port)); @@ -2676,7 +2710,7 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata) if (intel_bios_encoder_supports_dp(devdata)) sanitize_aux_ch(devdata, port); - i915->vbt.ports[port] = devdata; + i915->display.vbt.ports[port] = devdata; } static bool has_ddi_port_info(struct drm_i915_private *i915) @@ -2692,12 +2726,12 @@ static void parse_ddi_ports(struct drm_i915_private *i915) if (!has_ddi_port_info(i915)) return; - list_for_each_entry(devdata, &i915->vbt.display_devices, node) + list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) parse_ddi_port(devdata); for_each_port(port) { - if (i915->vbt.ports[port]) - print_ddi_port(i915->vbt.ports[port], port); + if (i915->display.vbt.ports[port]) + print_ddi_port(i915->display.vbt.ports[port], port); } } @@ -2730,33 +2764,33 @@ parse_general_definitions(struct drm_i915_private *i915) bus_pin = defs->crt_ddc_gmbus_pin; drm_dbg_kms(&i915->drm, "crt_ddc_bus_pin: %d\n", bus_pin); if (intel_gmbus_is_valid_pin(i915, bus_pin)) - i915->vbt.crt_ddc_pin = bus_pin; + i915->display.vbt.crt_ddc_pin = bus_pin; - if (i915->vbt.version < 106) { + if (i915->display.vbt.version < 106) { expected_size = 22; - } else if (i915->vbt.version < 111) { + } else if (i915->display.vbt.version < 111) { expected_size = 27; - } else if (i915->vbt.version < 195) { + } else if (i915->display.vbt.version < 195) { expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE; - } else if (i915->vbt.version == 195) { + } else if (i915->display.vbt.version == 195) { expected_size = 37; - } else if (i915->vbt.version <= 215) { + } else if (i915->display.vbt.version <= 215) { expected_size = 38; - } else if (i915->vbt.version <= 237) { + } else if (i915->display.vbt.version <= 237) { expected_size = 39; } else { expected_size = sizeof(*child); BUILD_BUG_ON(sizeof(*child) < 39); drm_dbg(&i915->drm, "Expected child device config size for VBT version %u not known; assuming %u\n", - i915->vbt.version, expected_size); + i915->display.vbt.version, expected_size); } /* Flag an error for unexpected size, but continue anyway. */ if (defs->child_dev_size != expected_size) drm_err(&i915->drm, "Unexpected child device config size %u (expected %u for VBT version %u)\n", - defs->child_dev_size, expected_size, i915->vbt.version); + defs->child_dev_size, expected_size, i915->display.vbt.version); /* The legacy sized child device config is the minimum we need. */ if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) { @@ -2792,10 +2826,10 @@ parse_general_definitions(struct drm_i915_private *i915) memcpy(&devdata->child, child, min_t(size_t, defs->child_dev_size, sizeof(*child))); - list_add_tail(&devdata->node, &i915->vbt.display_devices); + list_add_tail(&devdata->node, &i915->display.vbt.display_devices); } - if (list_empty(&i915->vbt.display_devices)) + if (list_empty(&i915->display.vbt.display_devices)) drm_dbg_kms(&i915->drm, "no child dev is parsed from VBT\n"); } @@ -2804,25 +2838,25 @@ parse_general_definitions(struct drm_i915_private *i915) static void init_vbt_defaults(struct drm_i915_private *i915) { - i915->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC; + i915->display.vbt.crt_ddc_pin = GMBUS_PIN_VGADDC; /* general features */ - i915->vbt.int_tv_support = 1; - i915->vbt.int_crt_support = 1; + i915->display.vbt.int_tv_support = 1; + i915->display.vbt.int_crt_support = 1; /* driver features */ - i915->vbt.int_lvds_support = 1; + i915->display.vbt.int_lvds_support = 1; /* Default to using SSC */ - i915->vbt.lvds_use_ssc = 1; + i915->display.vbt.lvds_use_ssc = 1; /* * Core/SandyBridge/IvyBridge use alternative (120MHz) reference * clock for LVDS. */ - i915->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(i915, - !HAS_PCH_SPLIT(i915)); + i915->display.vbt.lvds_ssc_freq = intel_bios_ssc_frequency(i915, + !HAS_PCH_SPLIT(i915)); drm_dbg_kms(&i915->drm, "Set default to SSC at %d kHz\n", - i915->vbt.lvds_ssc_freq); + i915->display.vbt.lvds_ssc_freq); } /* Common defaults which may be overridden by VBT. */ @@ -2883,7 +2917,7 @@ init_vbt_missing_defaults(struct drm_i915_private *i915) if (port == PORT_A) child->device_type |= DEVICE_TYPE_INTERNAL_CONNECTOR; - list_add_tail(&devdata->node, &i915->vbt.display_devices); + list_add_tail(&devdata->node, &i915->display.vbt.display_devices); drm_dbg_kms(&i915->drm, "Generating default VBT child device with type 0x04%x on port %c\n", @@ -2891,7 +2925,7 @@ init_vbt_missing_defaults(struct drm_i915_private *i915) } /* Bypass some minimum baseline VBT version checks */ - i915->vbt.version = 155; + i915->display.vbt.version = 155; } static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt) @@ -3078,12 +3112,12 @@ err_unmap_oprom: */ void intel_bios_init(struct drm_i915_private *i915) { - const struct vbt_header *vbt = i915->opregion.vbt; + const struct vbt_header *vbt = i915->display.opregion.vbt; struct vbt_header *oprom_vbt = NULL; const struct bdb_header *bdb; - INIT_LIST_HEAD(&i915->vbt.display_devices); - INIT_LIST_HEAD(&i915->vbt.bdb_blocks); + INIT_LIST_HEAD(&i915->display.vbt.display_devices); + INIT_LIST_HEAD(&i915->display.vbt.bdb_blocks); if (!HAS_DISPLAY(i915)) { drm_dbg_kms(&i915->drm, @@ -3111,11 +3145,11 @@ void intel_bios_init(struct drm_i915_private *i915) goto out; bdb = get_bdb_header(vbt); - i915->vbt.version = bdb->version; + i915->display.vbt.version = bdb->version; drm_dbg_kms(&i915->drm, "VBT signature \"%.*s\", BDB version %d\n", - (int)sizeof(vbt->signature), vbt->signature, i915->vbt.version); + (int)sizeof(vbt->signature), vbt->signature, i915->display.vbt.version); init_bdb_blocks(i915, bdb); @@ -3172,13 +3206,13 @@ void intel_bios_driver_remove(struct drm_i915_private *i915) struct intel_bios_encoder_data *devdata, *nd; struct bdb_block_entry *entry, *ne; - list_for_each_entry_safe(devdata, nd, &i915->vbt.display_devices, node) { + list_for_each_entry_safe(devdata, nd, &i915->display.vbt.display_devices, node) { list_del(&devdata->node); kfree(devdata->dsc); kfree(devdata); } - list_for_each_entry_safe(entry, ne, &i915->vbt.bdb_blocks, node) { + list_for_each_entry_safe(entry, ne, &i915->display.vbt.bdb_blocks, node) { list_del(&entry->node); kfree(entry); } @@ -3212,13 +3246,13 @@ bool intel_bios_is_tv_present(struct drm_i915_private *i915) const struct intel_bios_encoder_data *devdata; const struct child_device_config *child; - if (!i915->vbt.int_tv_support) + if (!i915->display.vbt.int_tv_support) return false; - if (list_empty(&i915->vbt.display_devices)) + if (list_empty(&i915->display.vbt.display_devices)) return true; - list_for_each_entry(devdata, &i915->vbt.display_devices, node) { + list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { child = &devdata->child; /* @@ -3255,10 +3289,10 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin) const struct intel_bios_encoder_data *devdata; const struct child_device_config *child; - if (list_empty(&i915->vbt.display_devices)) + if (list_empty(&i915->display.vbt.display_devices)) return true; - list_for_each_entry(devdata, &i915->vbt.display_devices, node) { + list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { child = &devdata->child; /* If the device type is not LFP, continue. @@ -3285,7 +3319,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin) * additional data. Trust that if the VBT was written into * the OpRegion then they have validated the LVDS's existence. */ - if (i915->opregion.vbt) + if (i915->display.opregion.vbt) return true; } @@ -3304,7 +3338,7 @@ bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port) if (WARN_ON(!has_ddi_port_info(i915))) return true; - return i915->vbt.ports[port]; + return i915->display.vbt.ports[port]; } /** @@ -3364,7 +3398,7 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *i915, const struct child_device_config *child; u8 dvo_port; - list_for_each_entry(devdata, &i915->vbt.display_devices, node) { + list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { child = &devdata->child; if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT)) @@ -3463,7 +3497,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder, const struct intel_bios_encoder_data *devdata; const struct child_device_config *child; - list_for_each_entry(devdata, &i915->vbt.display_devices, node) { + list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { child = &devdata->child; if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT)) @@ -3494,7 +3528,7 @@ bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915, enum port port) { - const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port]; + const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port]; if (drm_WARN_ON_ONCE(&i915->drm, !IS_GEMINILAKE(i915) && !IS_BROXTON(i915))) @@ -3514,7 +3548,7 @@ bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915, enum port port) { - const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port]; + const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port]; return HAS_LSPCON(i915) && devdata && devdata->child.lspcon; } @@ -3530,7 +3564,7 @@ bool intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915, enum port port) { - const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port]; + const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port]; return devdata && devdata->child.lane_reversal; } @@ -3538,7 +3572,7 @@ intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915, enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915, enum port port) { - const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port]; + const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port]; enum aux_ch aux_ch; if (!devdata || !devdata->child.aux_channel) { @@ -3576,7 +3610,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915, aux_ch = AUX_CH_C; break; case DP_AUX_D: - if (DISPLAY_VER(i915) == 13) + if (DISPLAY_VER(i915) >= 13) aux_ch = AUX_CH_D_XELPD; else if (IS_ALDERLAKE_S(i915)) aux_ch = AUX_CH_USBC3; @@ -3586,7 +3620,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915, aux_ch = AUX_CH_D; break; case DP_AUX_E: - if (DISPLAY_VER(i915) == 13) + if (DISPLAY_VER(i915) >= 13) aux_ch = AUX_CH_E_XELPD; else if (IS_ALDERLAKE_S(i915)) aux_ch = AUX_CH_USBC4; @@ -3594,25 +3628,25 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915, aux_ch = AUX_CH_E; break; case DP_AUX_F: - if (DISPLAY_VER(i915) == 13) + if (DISPLAY_VER(i915) >= 13) aux_ch = AUX_CH_USBC1; else aux_ch = AUX_CH_F; break; case DP_AUX_G: - if (DISPLAY_VER(i915) == 13) + if (DISPLAY_VER(i915) >= 13) aux_ch = AUX_CH_USBC2; else aux_ch = AUX_CH_G; break; case DP_AUX_H: - if (DISPLAY_VER(i915) == 13) + if (DISPLAY_VER(i915) >= 13) aux_ch = AUX_CH_USBC3; else aux_ch = AUX_CH_H; break; case DP_AUX_I: - if (DISPLAY_VER(i915) == 13) + if (DISPLAY_VER(i915) >= 13) aux_ch = AUX_CH_USBC4; else aux_ch = AUX_CH_I; @@ -3632,7 +3666,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915, int intel_bios_max_tmds_clock(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port]; + const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; return _intel_bios_max_tmds_clock(devdata); } @@ -3641,14 +3675,14 @@ int intel_bios_max_tmds_clock(struct intel_encoder *encoder) int intel_bios_hdmi_level_shift(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port]; + const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; return _intel_bios_hdmi_level_shift(devdata); } int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata) { - if (!devdata || devdata->i915->vbt.version < 196 || !devdata->child.iboost) + if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost) return 0; return translate_iboost(devdata->child.dp_iboost_level); @@ -3656,7 +3690,7 @@ int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devd int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *devdata) { - if (!devdata || devdata->i915->vbt.version < 196 || !devdata->child.iboost) + if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost) return 0; return translate_iboost(devdata->child.hdmi_iboost_level); @@ -3665,15 +3699,23 @@ int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *de int intel_bios_dp_max_link_rate(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port]; + const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; return _intel_bios_dp_max_link_rate(devdata); } +int intel_bios_dp_max_lane_count(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; + + return _intel_bios_dp_max_lane_count(devdata); +} + int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port]; + const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; if (!devdata || !devdata->child.ddc_pin) return 0; @@ -3683,16 +3725,16 @@ int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder) bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata) { - return devdata->i915->vbt.version >= 195 && devdata->child.dp_usb_type_c; + return devdata->i915->display.vbt.version >= 195 && devdata->child.dp_usb_type_c; } bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata) { - return devdata->i915->vbt.version >= 209 && devdata->child.tbt; + return devdata->i915->display.vbt.version >= 209 && devdata->child.tbt; } const struct intel_bios_encoder_data * intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port) { - return i915->vbt.ports[port]; + return i915->display.vbt.ports[port]; } diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h index e47582b0de0a..e375405a7828 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.h +++ b/drivers/gpu/drm/i915/display/intel_bios.h @@ -258,6 +258,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder, int intel_bios_max_tmds_clock(struct intel_encoder *encoder); int intel_bios_hdmi_level_shift(struct intel_encoder *encoder); int intel_bios_dp_max_link_rate(struct intel_encoder *encoder); +int intel_bios_dp_max_lane_count(struct intel_encoder *encoder); int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder); bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, enum port port); bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port); diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 79269d2c476b..4ace026b29bd 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -5,15 +5,17 @@ #include <drm/drm_atomic_state_helper.h> +#include "i915_drv.h" #include "i915_reg.h" #include "i915_utils.h" #include "intel_atomic.h" #include "intel_bw.h" #include "intel_cdclk.h" +#include "intel_display_core.h" #include "intel_display_types.h" +#include "skl_watermark.h" #include "intel_mchbar_regs.h" #include "intel_pcode.h" -#include "intel_pm.h" /* Parameters for Qclk Geyserville (QGV) */ struct intel_qgv_point { @@ -137,6 +139,42 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv, return 0; } +static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv, + struct intel_qgv_point *sp, int point) +{ + u32 val, val2; + u16 dclk; + + val = intel_uncore_read(&dev_priv->uncore, + MTL_MEM_SS_INFO_QGV_POINT_LOW(point)); + val2 = intel_uncore_read(&dev_priv->uncore, + MTL_MEM_SS_INFO_QGV_POINT_HIGH(point)); + dclk = REG_FIELD_GET(MTL_DCLK_MASK, val); + sp->dclk = DIV_ROUND_UP((16667 * dclk), 1000); + sp->t_rp = REG_FIELD_GET(MTL_TRP_MASK, val); + sp->t_rcd = REG_FIELD_GET(MTL_TRCD_MASK, val); + + sp->t_rdpre = REG_FIELD_GET(MTL_TRDPRE_MASK, val2); + sp->t_ras = REG_FIELD_GET(MTL_TRAS_MASK, val2); + + sp->t_rc = sp->t_rp + sp->t_ras; + + return 0; +} + +static int +intel_read_qgv_point_info(struct drm_i915_private *dev_priv, + struct intel_qgv_point *sp, + int point) +{ + if (DISPLAY_VER(dev_priv) >= 14) + return mtl_read_qgv_point_info(dev_priv, sp, point); + else if (IS_DG1(dev_priv)) + return dg1_mchbar_read_qgv_point_info(dev_priv, sp, point); + else + return icl_pcode_read_qgv_point_info(dev_priv, sp, point); +} + static int icl_get_qgv_points(struct drm_i915_private *dev_priv, struct intel_qgv_info *qi, bool is_y_tile) @@ -147,7 +185,32 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, qi->num_points = dram_info->num_qgv_points; qi->num_psf_points = dram_info->num_psf_gv_points; - if (DISPLAY_VER(dev_priv) >= 12) + if (DISPLAY_VER(dev_priv) >= 14) { + switch (dram_info->type) { + case INTEL_DRAM_DDR4: + qi->t_bl = 4; + qi->max_numchannels = 2; + qi->channel_width = 64; + qi->deinterleave = 2; + break; + case INTEL_DRAM_DDR5: + qi->t_bl = 8; + qi->max_numchannels = 4; + qi->channel_width = 32; + qi->deinterleave = 2; + break; + case INTEL_DRAM_LPDDR4: + case INTEL_DRAM_LPDDR5: + qi->t_bl = 16; + qi->max_numchannels = 8; + qi->channel_width = 16; + qi->deinterleave = 4; + break; + default: + MISSING_CASE(dram_info->type); + return -EINVAL; + } + } else if (DISPLAY_VER(dev_priv) >= 12) { switch (dram_info->type) { case INTEL_DRAM_DDR4: qi->t_bl = is_y_tile ? 8 : 4; @@ -181,7 +244,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, qi->max_numchannels = 1; break; } - else if (DISPLAY_VER(dev_priv) == 11) { + } else if (DISPLAY_VER(dev_priv) == 11) { qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8; qi->max_numchannels = 1; } @@ -193,11 +256,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, for (i = 0; i < qi->num_points; i++) { struct intel_qgv_point *sp = &qi->points[i]; - if (IS_DG1(dev_priv)) - ret = dg1_mchbar_read_qgv_point_info(dev_priv, sp, i); - else - ret = icl_pcode_read_qgv_point_info(dev_priv, sp, i); - + ret = intel_read_qgv_point_info(dev_priv, sp, i); if (ret) return ret; @@ -284,6 +343,13 @@ static const struct intel_sa_info adlp_sa_info = { .derating = 20, }; +static const struct intel_sa_info mtl_sa_info = { + .deburst = 32, + .deprogbwlimit = 38, /* GB/s */ + .displayrtids = 256, + .derating = 20, +}; + static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa) { struct intel_qgv_info qi = {}; @@ -292,7 +358,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel int ipqdepth, ipqdepthpch = 16; int dclk_max; int maxdebw; - int num_groups = ARRAY_SIZE(dev_priv->max_bw); + int num_groups = ARRAY_SIZE(dev_priv->display.bw.max); int i, ret; ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile); @@ -308,7 +374,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2); for (i = 0; i < num_groups; i++) { - struct intel_bw_info *bi = &dev_priv->max_bw[i]; + struct intel_bw_info *bi = &dev_priv->display.bw.max[i]; int clpchgroup; int j; @@ -346,9 +412,9 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel * as it will fail and pointless anyway. */ if (qi.num_points == 1) - dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; + dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED; else - dev_priv->sagv_status = I915_SAGV_ENABLED; + dev_priv->display.sagv.status = I915_SAGV_ENABLED; return 0; } @@ -363,7 +429,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel int dclk_max; int maxdebw, peakbw; int clperchgroup; - int num_groups = ARRAY_SIZE(dev_priv->max_bw); + int num_groups = ARRAY_SIZE(dev_priv->display.bw.max); int i, ret; ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile); @@ -399,20 +465,22 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave; for (i = 0; i < num_groups; i++) { - struct intel_bw_info *bi = &dev_priv->max_bw[i]; + struct intel_bw_info *bi = &dev_priv->display.bw.max[i]; struct intel_bw_info *bi_next; int clpchgroup; int j; - if (i < num_groups - 1) - bi_next = &dev_priv->max_bw[i + 1]; - clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i; - if (i < num_groups - 1 && clpchgroup < clperchgroup) - bi_next->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; - else - bi_next->num_planes = 0; + if (i < num_groups - 1) { + bi_next = &dev_priv->display.bw.max[i + 1]; + + if (clpchgroup < clperchgroup) + bi_next->num_planes = (ipqdepth - clpchgroup) / + clpchgroup + 1; + else + bi_next->num_planes = 0; + } bi->num_qgv_points = qi.num_points; bi->num_psf_gv_points = qi.num_psf_points; @@ -456,9 +524,9 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel * as it will fail and pointless anyway. */ if (qi.num_points == 1) - dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; + dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED; else - dev_priv->sagv_status = I915_SAGV_ENABLED; + dev_priv->display.sagv.status = I915_SAGV_ENABLED; return 0; } @@ -466,7 +534,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel static void dg2_get_bw_info(struct drm_i915_private *i915) { unsigned int deratedbw = IS_DG2_G11(i915) ? 38000 : 50000; - int num_groups = ARRAY_SIZE(i915->max_bw); + int num_groups = ARRAY_SIZE(i915->display.bw.max); int i; /* @@ -477,7 +545,7 @@ static void dg2_get_bw_info(struct drm_i915_private *i915) * whereas DG2-G11 platforms have 38 GB/s. */ for (i = 0; i < num_groups; i++) { - struct intel_bw_info *bi = &i915->max_bw[i]; + struct intel_bw_info *bi = &i915->display.bw.max[i]; bi->num_planes = 1; /* Need only one dummy QGV point per group */ @@ -485,7 +553,7 @@ static void dg2_get_bw_info(struct drm_i915_private *i915) bi->deratedbw[0] = deratedbw; } - i915->sagv_status = I915_SAGV_NOT_CONTROLLED; + i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; } static unsigned int icl_max_bw(struct drm_i915_private *dev_priv, @@ -498,9 +566,9 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv, */ num_planes = max(1, num_planes); - for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) { + for (i = 0; i < ARRAY_SIZE(dev_priv->display.bw.max); i++) { const struct intel_bw_info *bi = - &dev_priv->max_bw[i]; + &dev_priv->display.bw.max[i]; /* * Pcode will not expose all QGV points when @@ -526,9 +594,9 @@ static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv, */ num_planes = max(1, num_planes); - for (i = ARRAY_SIZE(dev_priv->max_bw) - 1; i >= 0; i--) { + for (i = ARRAY_SIZE(dev_priv->display.bw.max) - 1; i >= 0; i--) { const struct intel_bw_info *bi = - &dev_priv->max_bw[i]; + &dev_priv->display.bw.max[i]; /* * Pcode will not expose all QGV points when @@ -541,14 +609,14 @@ static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv, return bi->deratedbw[qgv_point]; } - return dev_priv->max_bw[0].deratedbw[qgv_point]; + return dev_priv->display.bw.max[0].deratedbw[qgv_point]; } static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv, int psf_gv_point) { const struct intel_bw_info *bi = - &dev_priv->max_bw[0]; + &dev_priv->display.bw.max[0]; return bi->psf_bw[psf_gv_point]; } @@ -558,7 +626,9 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) return; - if (IS_DG2(dev_priv)) + if (DISPLAY_VER(dev_priv) >= 14) + tgl_get_bw_info(dev_priv, &mtl_sa_info); + else if (IS_DG2(dev_priv)) dg2_get_bw_info(dev_priv); else if (IS_ALDERLAKE_P(dev_priv)) tgl_get_bw_info(dev_priv, &adlp_sa_info); @@ -667,7 +737,7 @@ intel_atomic_get_old_bw_state(struct intel_atomic_state *state) struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_global_state *bw_state; - bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->bw_obj); + bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->display.bw.obj); return to_intel_bw_state(bw_state); } @@ -678,7 +748,7 @@ intel_atomic_get_new_bw_state(struct intel_atomic_state *state) struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_global_state *bw_state; - bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->bw_obj); + bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->display.bw.obj); return to_intel_bw_state(bw_state); } @@ -689,7 +759,7 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state) struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_global_state *bw_state; - bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->bw_obj); + bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.bw.obj); if (IS_ERR(bw_state)) return ERR_CAST(bw_state); @@ -896,8 +966,8 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state, static u16 icl_qgv_points_mask(struct drm_i915_private *i915) { - unsigned int num_psf_gv_points = i915->max_bw[0].num_psf_gv_points; - unsigned int num_qgv_points = i915->max_bw[0].num_qgv_points; + unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points; + unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points; u16 qgv_points = 0, psf_points = 0; /* @@ -970,8 +1040,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) int i, ret; u16 qgv_points = 0, psf_points = 0; unsigned int max_bw_point = 0, max_bw = 0; - unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points; - unsigned int num_psf_gv_points = dev_priv->max_bw[0].num_psf_gv_points; + unsigned int num_qgv_points = dev_priv->display.bw.max[0].num_qgv_points; + unsigned int num_psf_gv_points = dev_priv->display.bw.max[0].num_psf_gv_points; bool changed = false; /* FIXME earlier gens need some checks too */ @@ -1126,7 +1196,7 @@ int intel_bw_init(struct drm_i915_private *dev_priv) if (!state) return -ENOMEM; - intel_atomic_global_obj_init(dev_priv, &dev_priv->bw_obj, + intel_atomic_global_obj_init(dev_priv, &dev_priv->display.bw.obj, &state->base, &intel_bw_funcs); return 0; diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 6e80162632dd..ed05070b7307 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -79,26 +79,26 @@ struct intel_cdclk_funcs { void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { - dev_priv->cdclk_funcs->get_cdclk(dev_priv, cdclk_config); + dev_priv->display.funcs.cdclk->get_cdclk(dev_priv, cdclk_config); } static void intel_cdclk_set_cdclk(struct drm_i915_private *dev_priv, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { - dev_priv->cdclk_funcs->set_cdclk(dev_priv, cdclk_config, pipe); + dev_priv->display.funcs.cdclk->set_cdclk(dev_priv, cdclk_config, pipe); } static int intel_cdclk_modeset_calc_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_state *cdclk_config) { - return dev_priv->cdclk_funcs->modeset_calc_cdclk(cdclk_config); + return dev_priv->display.funcs.cdclk->modeset_calc_cdclk(cdclk_config); } static u8 intel_cdclk_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk) { - return dev_priv->cdclk_funcs->calc_voltage_level(cdclk); + return dev_priv->display.funcs.cdclk->calc_voltage_level(cdclk); } static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv, @@ -548,7 +548,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) else default_credits = PFI_CREDIT(8); - if (dev_priv->cdclk.hw.cdclk >= dev_priv->czclk_freq) { + if (dev_priv->display.cdclk.hw.cdclk >= dev_priv->czclk_freq) { /* CHV suggested value is 31 or 63 */ if (IS_CHERRYVIEW(dev_priv)) credits = PFI_CREDIT_63; @@ -1026,7 +1026,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5)) drm_err(&dev_priv->drm, "DPLL0 not locked\n"); - dev_priv->cdclk.hw.vco = vco; + dev_priv->display.cdclk.hw.vco = vco; /* We'll want to keep using the current vco from now on. */ skl_set_preferred_cdclk_vco(dev_priv, vco); @@ -1040,7 +1040,7 @@ static void skl_dpll0_disable(struct drm_i915_private *dev_priv) if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1)) drm_err(&dev_priv->drm, "Couldn't disable DPLL0\n"); - dev_priv->cdclk.hw.vco = 0; + dev_priv->display.cdclk.hw.vco = 0; } static u32 skl_cdclk_freq_sel(struct drm_i915_private *dev_priv, @@ -1049,7 +1049,7 @@ static u32 skl_cdclk_freq_sel(struct drm_i915_private *dev_priv, switch (cdclk) { default: drm_WARN_ON(&dev_priv->drm, - cdclk != dev_priv->cdclk.hw.bypass); + cdclk != dev_priv->display.cdclk.hw.bypass); drm_WARN_ON(&dev_priv->drm, vco != 0); fallthrough; case 308571: @@ -1098,13 +1098,13 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, freq_select = skl_cdclk_freq_sel(dev_priv, cdclk, vco); - if (dev_priv->cdclk.hw.vco != 0 && - dev_priv->cdclk.hw.vco != vco) + if (dev_priv->display.cdclk.hw.vco != 0 && + dev_priv->display.cdclk.hw.vco != vco) skl_dpll0_disable(dev_priv); cdclk_ctl = intel_de_read(dev_priv, CDCLK_CTL); - if (dev_priv->cdclk.hw.vco != vco) { + if (dev_priv->display.cdclk.hw.vco != vco) { /* Wa Display #1183: skl,kbl,cfl */ cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK); cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk); @@ -1116,7 +1116,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); intel_de_posting_read(dev_priv, CDCLK_CTL); - if (dev_priv->cdclk.hw.vco != vco) + if (dev_priv->display.cdclk.hw.vco != vco) skl_dpll0_enable(dev_priv, vco); /* Wa Display #1183: skl,kbl,cfl */ @@ -1151,11 +1151,11 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) goto sanitize; intel_update_cdclk(dev_priv); - intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK"); + intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK"); /* Is PLL enabled and locked ? */ - if (dev_priv->cdclk.hw.vco == 0 || - dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass) + if (dev_priv->display.cdclk.hw.vco == 0 || + dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass) goto sanitize; /* DPLL okay; verify the cdclock @@ -1166,7 +1166,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) */ cdctl = intel_de_read(dev_priv, CDCLK_CTL); expected = (cdctl & CDCLK_FREQ_SEL_MASK) | - skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk); + skl_cdclk_decimal(dev_priv->display.cdclk.hw.cdclk); if (cdctl == expected) /* All well; nothing to sanitize */ return; @@ -1175,9 +1175,9 @@ sanitize: drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n"); /* force cdclk programming */ - dev_priv->cdclk.hw.cdclk = 0; + dev_priv->display.cdclk.hw.cdclk = 0; /* force full PLL disable + enable */ - dev_priv->cdclk.hw.vco = -1; + dev_priv->display.cdclk.hw.vco = -1; } static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv) @@ -1186,19 +1186,19 @@ static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv) skl_sanitize_cdclk(dev_priv); - if (dev_priv->cdclk.hw.cdclk != 0 && - dev_priv->cdclk.hw.vco != 0) { + if (dev_priv->display.cdclk.hw.cdclk != 0 && + dev_priv->display.cdclk.hw.vco != 0) { /* * Use the current vco as our initial * guess as to what the preferred vco is. */ if (dev_priv->skl_preferred_vco_freq == 0) skl_set_preferred_cdclk_vco(dev_priv, - dev_priv->cdclk.hw.vco); + dev_priv->display.cdclk.hw.vco); return; } - cdclk_config = dev_priv->cdclk.hw; + cdclk_config = dev_priv->display.cdclk.hw; cdclk_config.vco = dev_priv->skl_preferred_vco_freq; if (cdclk_config.vco == 0) @@ -1211,7 +1211,7 @@ static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv) static void skl_cdclk_uninit_hw(struct drm_i915_private *dev_priv) { - struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw; + struct intel_cdclk_config cdclk_config = dev_priv->display.cdclk.hw; cdclk_config.cdclk = cdclk_config.bypass; cdclk_config.vco = 0; @@ -1352,35 +1352,35 @@ static const struct intel_cdclk_vals dg2_cdclk_table[] = { static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk) { - const struct intel_cdclk_vals *table = dev_priv->cdclk.table; + const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table; int i; for (i = 0; table[i].refclk; i++) - if (table[i].refclk == dev_priv->cdclk.hw.ref && + if (table[i].refclk == dev_priv->display.cdclk.hw.ref && table[i].cdclk >= min_cdclk) return table[i].cdclk; drm_WARN(&dev_priv->drm, 1, "Cannot satisfy minimum cdclk %d with refclk %u\n", - min_cdclk, dev_priv->cdclk.hw.ref); + min_cdclk, dev_priv->display.cdclk.hw.ref); return 0; } static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) { - const struct intel_cdclk_vals *table = dev_priv->cdclk.table; + const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table; int i; - if (cdclk == dev_priv->cdclk.hw.bypass) + if (cdclk == dev_priv->display.cdclk.hw.bypass) return 0; for (i = 0; table[i].refclk; i++) - if (table[i].refclk == dev_priv->cdclk.hw.ref && + if (table[i].refclk == dev_priv->display.cdclk.hw.ref && table[i].cdclk == cdclk) - return dev_priv->cdclk.hw.ref * table[i].ratio; + return dev_priv->display.cdclk.hw.ref * table[i].ratio; drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n", - cdclk, dev_priv->cdclk.hw.ref); + cdclk, dev_priv->display.cdclk.hw.ref); return 0; } @@ -1554,12 +1554,12 @@ static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) drm_err(&dev_priv->drm, "timeout waiting for DE PLL unlock\n"); - dev_priv->cdclk.hw.vco = 0; + dev_priv->display.cdclk.hw.vco = 0; } static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco) { - int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref); + int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref); intel_de_rmw(dev_priv, BXT_DE_PLL_CTL, BXT_DE_PLL_RATIO_MASK, BXT_DE_PLL_RATIO(ratio)); @@ -1571,7 +1571,7 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco) BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) drm_err(&dev_priv->drm, "timeout waiting for DE PLL lock\n"); - dev_priv->cdclk.hw.vco = vco; + dev_priv->display.cdclk.hw.vco = vco; } static void icl_cdclk_pll_disable(struct drm_i915_private *dev_priv) @@ -1583,12 +1583,12 @@ static void icl_cdclk_pll_disable(struct drm_i915_private *dev_priv) if (intel_de_wait_for_clear(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL unlock\n"); - dev_priv->cdclk.hw.vco = 0; + dev_priv->display.cdclk.hw.vco = 0; } static void icl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco) { - int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref); + int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref); u32 val; val = ICL_CDCLK_PLL_RATIO(ratio); @@ -1601,12 +1601,12 @@ static void icl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco) if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) drm_err(&dev_priv->drm, "timeout waiting for CDCLK PLL lock\n"); - dev_priv->cdclk.hw.vco = vco; + dev_priv->display.cdclk.hw.vco = vco; } static void adlp_cdclk_pll_crawl(struct drm_i915_private *dev_priv, int vco) { - int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref); + int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->display.cdclk.hw.ref); u32 val; /* Write PLL ratio without disabling */ @@ -1625,7 +1625,7 @@ static void adlp_cdclk_pll_crawl(struct drm_i915_private *dev_priv, int vco) val &= ~BXT_DE_PLL_FREQ_REQ; intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); - dev_priv->cdclk.hw.vco = vco; + dev_priv->display.cdclk.hw.vco = vco; } static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) @@ -1655,7 +1655,7 @@ static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv, switch (DIV_ROUND_CLOSEST(vco, cdclk)) { default: drm_WARN_ON(&dev_priv->drm, - cdclk != dev_priv->cdclk.hw.bypass); + cdclk != dev_priv->display.cdclk.hw.bypass); drm_WARN_ON(&dev_priv->drm, vco != 0); fallthrough; case 2: @@ -1672,19 +1672,19 @@ static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv, static u32 cdclk_squash_waveform(struct drm_i915_private *dev_priv, int cdclk) { - const struct intel_cdclk_vals *table = dev_priv->cdclk.table; + const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table; int i; - if (cdclk == dev_priv->cdclk.hw.bypass) + if (cdclk == dev_priv->display.cdclk.hw.bypass) return 0; for (i = 0; table[i].refclk; i++) - if (table[i].refclk == dev_priv->cdclk.hw.ref && + if (table[i].refclk == dev_priv->display.cdclk.hw.ref && table[i].cdclk == cdclk) return table[i].waveform; drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n", - cdclk, dev_priv->cdclk.hw.ref); + cdclk, dev_priv->display.cdclk.hw.ref); return 0xffff; } @@ -1721,22 +1721,22 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, return; } - if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->cdclk.hw.vco > 0 && vco > 0) { - if (dev_priv->cdclk.hw.vco != vco) + if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0) { + if (dev_priv->display.cdclk.hw.vco != vco) adlp_cdclk_pll_crawl(dev_priv, vco); } else if (DISPLAY_VER(dev_priv) >= 11) { - if (dev_priv->cdclk.hw.vco != 0 && - dev_priv->cdclk.hw.vco != vco) + if (dev_priv->display.cdclk.hw.vco != 0 && + dev_priv->display.cdclk.hw.vco != vco) icl_cdclk_pll_disable(dev_priv); - if (dev_priv->cdclk.hw.vco != vco) + if (dev_priv->display.cdclk.hw.vco != vco) icl_cdclk_pll_enable(dev_priv, vco); } else { - if (dev_priv->cdclk.hw.vco != 0 && - dev_priv->cdclk.hw.vco != vco) + if (dev_priv->display.cdclk.hw.vco != 0 && + dev_priv->display.cdclk.hw.vco != vco) bxt_de_pll_disable(dev_priv); - if (dev_priv->cdclk.hw.vco != vco) + if (dev_priv->display.cdclk.hw.vco != vco) bxt_de_pll_enable(dev_priv, vco); } @@ -1803,7 +1803,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, * Can't read out the voltage level :( * Let's just assume everything is as expected. */ - dev_priv->cdclk.hw.voltage_level = cdclk_config->voltage_level; + dev_priv->display.cdclk.hw.voltage_level = cdclk_config->voltage_level; } static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) @@ -1812,10 +1812,10 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) int cdclk, clock, vco; intel_update_cdclk(dev_priv); - intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK"); + intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK"); - if (dev_priv->cdclk.hw.vco == 0 || - dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass) + if (dev_priv->display.cdclk.hw.vco == 0 || + dev_priv->display.cdclk.hw.cdclk == dev_priv->display.cdclk.hw.bypass) goto sanitize; /* DPLL okay; verify the cdclock @@ -1833,32 +1833,32 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE); /* Make sure this is a legal cdclk value for the platform */ - cdclk = bxt_calc_cdclk(dev_priv, dev_priv->cdclk.hw.cdclk); - if (cdclk != dev_priv->cdclk.hw.cdclk) + cdclk = bxt_calc_cdclk(dev_priv, dev_priv->display.cdclk.hw.cdclk); + if (cdclk != dev_priv->display.cdclk.hw.cdclk) goto sanitize; /* Make sure the VCO is correct for the cdclk */ vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); - if (vco != dev_priv->cdclk.hw.vco) + if (vco != dev_priv->display.cdclk.hw.vco) goto sanitize; expected = skl_cdclk_decimal(cdclk); /* Figure out what CD2X divider we should be using for this cdclk */ if (has_cdclk_squasher(dev_priv)) - clock = dev_priv->cdclk.hw.vco / 2; + clock = dev_priv->display.cdclk.hw.vco / 2; else - clock = dev_priv->cdclk.hw.cdclk; + clock = dev_priv->display.cdclk.hw.cdclk; expected |= bxt_cdclk_cd2x_div_sel(dev_priv, clock, - dev_priv->cdclk.hw.vco); + dev_priv->display.cdclk.hw.vco); /* * Disable SSA Precharge when CD clock frequency < 500 MHz, * enable otherwise. */ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && - dev_priv->cdclk.hw.cdclk >= 500000) + dev_priv->display.cdclk.hw.cdclk >= 500000) expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; if (cdctl == expected) @@ -1869,10 +1869,10 @@ sanitize: drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n"); /* force cdclk programming */ - dev_priv->cdclk.hw.cdclk = 0; + dev_priv->display.cdclk.hw.cdclk = 0; /* force full PLL disable + enable */ - dev_priv->cdclk.hw.vco = -1; + dev_priv->display.cdclk.hw.vco = -1; } static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv) @@ -1881,11 +1881,11 @@ static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv) bxt_sanitize_cdclk(dev_priv); - if (dev_priv->cdclk.hw.cdclk != 0 && - dev_priv->cdclk.hw.vco != 0) + if (dev_priv->display.cdclk.hw.cdclk != 0 && + dev_priv->display.cdclk.hw.vco != 0) return; - cdclk_config = dev_priv->cdclk.hw; + cdclk_config = dev_priv->display.cdclk.hw; /* * FIXME: @@ -1902,7 +1902,7 @@ static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv) static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv) { - struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw; + struct intel_cdclk_config cdclk_config = dev_priv->display.cdclk.hw; cdclk_config.cdclk = cdclk_config.bypass; cdclk_config.vco = 0; @@ -1916,7 +1916,7 @@ static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv) * intel_cdclk_init_hw - Initialize CDCLK hardware * @i915: i915 device * - * Initialize CDCLK. This consists mainly of initializing dev_priv->cdclk.hw and + * Initialize CDCLK. This consists mainly of initializing dev_priv->display.cdclk.hw and * sanitizing the state of the hardware if needed. This is generally done only * during the display core initialization sequence, after which the DMC will * take care of turning CDCLK off/on as needed. @@ -2077,10 +2077,10 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, { struct intel_encoder *encoder; - if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config)) + if (!intel_cdclk_changed(&dev_priv->display.cdclk.hw, cdclk_config)) return; - if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->cdclk_funcs->set_cdclk)) + if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.funcs.cdclk->set_cdclk)) return; intel_cdclk_dump_config(dev_priv, cdclk_config, "Changing CDCLK to"); @@ -2098,12 +2098,12 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, * functions use cdclk. Not all platforms/ports do, * but we'll lock them all for simplicity. */ - mutex_lock(&dev_priv->gmbus_mutex); + mutex_lock(&dev_priv->display.gmbus.mutex); for_each_intel_dp(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); mutex_lock_nest_lock(&intel_dp->aux.hw_mutex, - &dev_priv->gmbus_mutex); + &dev_priv->display.gmbus.mutex); } intel_cdclk_set_cdclk(dev_priv, cdclk_config, pipe); @@ -2113,7 +2113,7 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, mutex_unlock(&intel_dp->aux.hw_mutex); } - mutex_unlock(&dev_priv->gmbus_mutex); + mutex_unlock(&dev_priv->display.gmbus.mutex); for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -2124,9 +2124,9 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, intel_audio_cdclk_change_post(dev_priv); if (drm_WARN(&dev_priv->drm, - intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config), + intel_cdclk_changed(&dev_priv->display.cdclk.hw, cdclk_config), "cdclk state doesn't match!\n")) { - intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "[hw state]"); + intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "[hw state]"); intel_cdclk_dump_config(dev_priv, cdclk_config, "[sw state]"); } } @@ -2300,7 +2300,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate); /* - * HACK. Currently for TGL platforms we calculate + * HACK. Currently for TGL/DG2 platforms we calculate * min_cdclk initially based on pixel_rate divided * by 2, accounting for also plane requirements, * however in some cases the lowest possible CDCLK @@ -2308,14 +2308,14 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) * Explicitly stating here that this seems to be currently * rather a Hack, than final solution. */ - if (IS_TIGERLAKE(dev_priv)) { + if (IS_TIGERLAKE(dev_priv) || IS_DG2(dev_priv)) { /* * Clamp to max_cdclk_freq in case pixel rate is higher, * in order not to break an 8K, but still leave W/A at place. */ min_cdclk = max_t(int, min_cdclk, min_t(int, crtc_state->pixel_rate, - dev_priv->max_cdclk_freq)); + dev_priv->display.cdclk.max_cdclk_freq)); } return min_cdclk; @@ -2368,10 +2368,10 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state) for_each_pipe(dev_priv, pipe) min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk); - if (min_cdclk > dev_priv->max_cdclk_freq) { + if (min_cdclk > dev_priv->display.cdclk.max_cdclk_freq) { drm_dbg_kms(&dev_priv->drm, "required cdclk (%d kHz) exceeds max (%d kHz)\n", - min_cdclk, dev_priv->max_cdclk_freq); + min_cdclk, dev_priv->display.cdclk.max_cdclk_freq); return -EINVAL; } @@ -2643,7 +2643,7 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state) struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_global_state *cdclk_state; - cdclk_state = intel_atomic_get_global_obj_state(state, &dev_priv->cdclk.obj); + cdclk_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.cdclk.obj); if (IS_ERR(cdclk_state)) return ERR_CAST(cdclk_state); @@ -2693,7 +2693,7 @@ int intel_cdclk_init(struct drm_i915_private *dev_priv) if (!cdclk_state) return -ENOMEM; - intel_atomic_global_obj_init(dev_priv, &dev_priv->cdclk.obj, + intel_atomic_global_obj_init(dev_priv, &dev_priv->display.cdclk.obj, &cdclk_state->base, &intel_cdclk_funcs); return 0; @@ -2799,7 +2799,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) { - int max_cdclk_freq = dev_priv->max_cdclk_freq; + int max_cdclk_freq = dev_priv->display.cdclk.max_cdclk_freq; if (DISPLAY_VER(dev_priv) >= 10) return 2 * max_cdclk_freq; @@ -2825,19 +2825,19 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) void intel_update_max_cdclk(struct drm_i915_private *dev_priv) { if (IS_JSL_EHL(dev_priv)) { - if (dev_priv->cdclk.hw.ref == 24000) - dev_priv->max_cdclk_freq = 552000; + if (dev_priv->display.cdclk.hw.ref == 24000) + dev_priv->display.cdclk.max_cdclk_freq = 552000; else - dev_priv->max_cdclk_freq = 556800; + dev_priv->display.cdclk.max_cdclk_freq = 556800; } else if (DISPLAY_VER(dev_priv) >= 11) { - if (dev_priv->cdclk.hw.ref == 24000) - dev_priv->max_cdclk_freq = 648000; + if (dev_priv->display.cdclk.hw.ref == 24000) + dev_priv->display.cdclk.max_cdclk_freq = 648000; else - dev_priv->max_cdclk_freq = 652800; + dev_priv->display.cdclk.max_cdclk_freq = 652800; } else if (IS_GEMINILAKE(dev_priv)) { - dev_priv->max_cdclk_freq = 316800; + dev_priv->display.cdclk.max_cdclk_freq = 316800; } else if (IS_BROXTON(dev_priv)) { - dev_priv->max_cdclk_freq = 624000; + dev_priv->display.cdclk.max_cdclk_freq = 624000; } else if (DISPLAY_VER(dev_priv) == 9) { u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; int max_cdclk, vco; @@ -2859,7 +2859,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv) else max_cdclk = 308571; - dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco); + dev_priv->display.cdclk.max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco); } else if (IS_BROADWELL(dev_priv)) { /* * FIXME with extra cooling we can allow @@ -2868,26 +2868,26 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv) * available? PCI ID, VTB, something else? */ if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT) - dev_priv->max_cdclk_freq = 450000; + dev_priv->display.cdclk.max_cdclk_freq = 450000; else if (IS_BDW_ULX(dev_priv)) - dev_priv->max_cdclk_freq = 450000; + dev_priv->display.cdclk.max_cdclk_freq = 450000; else if (IS_BDW_ULT(dev_priv)) - dev_priv->max_cdclk_freq = 540000; + dev_priv->display.cdclk.max_cdclk_freq = 540000; else - dev_priv->max_cdclk_freq = 675000; + dev_priv->display.cdclk.max_cdclk_freq = 675000; } else if (IS_CHERRYVIEW(dev_priv)) { - dev_priv->max_cdclk_freq = 320000; + dev_priv->display.cdclk.max_cdclk_freq = 320000; } else if (IS_VALLEYVIEW(dev_priv)) { - dev_priv->max_cdclk_freq = 400000; + dev_priv->display.cdclk.max_cdclk_freq = 400000; } else { /* otherwise assume cdclk is fixed */ - dev_priv->max_cdclk_freq = dev_priv->cdclk.hw.cdclk; + dev_priv->display.cdclk.max_cdclk_freq = dev_priv->display.cdclk.hw.cdclk; } dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv); drm_dbg(&dev_priv->drm, "Max CD clock rate: %d kHz\n", - dev_priv->max_cdclk_freq); + dev_priv->display.cdclk.max_cdclk_freq); drm_dbg(&dev_priv->drm, "Max dotclock rate: %d kHz\n", dev_priv->max_dotclk_freq); @@ -2901,7 +2901,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv) */ void intel_update_cdclk(struct drm_i915_private *dev_priv) { - intel_cdclk_get_cdclk(dev_priv, &dev_priv->cdclk.hw); + intel_cdclk_get_cdclk(dev_priv, &dev_priv->display.cdclk.hw); /* * 9:0 CMBUS [sic] CDCLK frequency (cdfreq): @@ -2911,7 +2911,7 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv) */ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) intel_de_write(dev_priv, GMBUSFREQ_VLV, - DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000)); + DIV_ROUND_UP(dev_priv->display.cdclk.hw.cdclk, 1000)); } static int dg1_rawclk(struct drm_i915_private *dev_priv) @@ -3036,6 +3036,13 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv) if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) freq = dg1_rawclk(dev_priv); + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTP) + /* + * MTL always uses a 38.4 MHz rawclk. The bspec tells us + * "RAWCLK_FREQ defaults to the values for 38.4 and does + * not need to be programmed." + */ + freq = 38400; else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) freq = cnp_rawclk(dev_priv); else if (HAS_PCH_SPLIT(dev_priv)) @@ -3187,78 +3194,78 @@ static const struct intel_cdclk_funcs i830_cdclk_funcs = { void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) { if (IS_DG2(dev_priv)) { - dev_priv->cdclk_funcs = &tgl_cdclk_funcs; - dev_priv->cdclk.table = dg2_cdclk_table; + dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; + dev_priv->display.cdclk.table = dg2_cdclk_table; } else if (IS_ALDERLAKE_P(dev_priv)) { - dev_priv->cdclk_funcs = &tgl_cdclk_funcs; + dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; /* Wa_22011320316:adl-p[a0] */ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) - dev_priv->cdclk.table = adlp_a_step_cdclk_table; + dev_priv->display.cdclk.table = adlp_a_step_cdclk_table; else - dev_priv->cdclk.table = adlp_cdclk_table; + dev_priv->display.cdclk.table = adlp_cdclk_table; } else if (IS_ROCKETLAKE(dev_priv)) { - dev_priv->cdclk_funcs = &tgl_cdclk_funcs; - dev_priv->cdclk.table = rkl_cdclk_table; + dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; + dev_priv->display.cdclk.table = rkl_cdclk_table; } else if (DISPLAY_VER(dev_priv) >= 12) { - dev_priv->cdclk_funcs = &tgl_cdclk_funcs; - dev_priv->cdclk.table = icl_cdclk_table; + dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs; + dev_priv->display.cdclk.table = icl_cdclk_table; } else if (IS_JSL_EHL(dev_priv)) { - dev_priv->cdclk_funcs = &ehl_cdclk_funcs; - dev_priv->cdclk.table = icl_cdclk_table; + dev_priv->display.funcs.cdclk = &ehl_cdclk_funcs; + dev_priv->display.cdclk.table = icl_cdclk_table; } else if (DISPLAY_VER(dev_priv) >= 11) { - dev_priv->cdclk_funcs = &icl_cdclk_funcs; - dev_priv->cdclk.table = icl_cdclk_table; + dev_priv->display.funcs.cdclk = &icl_cdclk_funcs; + dev_priv->display.cdclk.table = icl_cdclk_table; } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { - dev_priv->cdclk_funcs = &bxt_cdclk_funcs; + dev_priv->display.funcs.cdclk = &bxt_cdclk_funcs; if (IS_GEMINILAKE(dev_priv)) - dev_priv->cdclk.table = glk_cdclk_table; + dev_priv->display.cdclk.table = glk_cdclk_table; else - dev_priv->cdclk.table = bxt_cdclk_table; + dev_priv->display.cdclk.table = bxt_cdclk_table; } else if (DISPLAY_VER(dev_priv) == 9) { - dev_priv->cdclk_funcs = &skl_cdclk_funcs; + dev_priv->display.funcs.cdclk = &skl_cdclk_funcs; } else if (IS_BROADWELL(dev_priv)) { - dev_priv->cdclk_funcs = &bdw_cdclk_funcs; + dev_priv->display.funcs.cdclk = &bdw_cdclk_funcs; } else if (IS_HASWELL(dev_priv)) { - dev_priv->cdclk_funcs = &hsw_cdclk_funcs; + dev_priv->display.funcs.cdclk = &hsw_cdclk_funcs; } else if (IS_CHERRYVIEW(dev_priv)) { - dev_priv->cdclk_funcs = &chv_cdclk_funcs; + dev_priv->display.funcs.cdclk = &chv_cdclk_funcs; } else if (IS_VALLEYVIEW(dev_priv)) { - dev_priv->cdclk_funcs = &vlv_cdclk_funcs; + dev_priv->display.funcs.cdclk = &vlv_cdclk_funcs; } else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) { - dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs; + dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs; } else if (IS_IRONLAKE(dev_priv)) { - dev_priv->cdclk_funcs = &ilk_cdclk_funcs; + dev_priv->display.funcs.cdclk = &ilk_cdclk_funcs; } else if (IS_GM45(dev_priv)) { - dev_priv->cdclk_funcs = &gm45_cdclk_funcs; + dev_priv->display.funcs.cdclk = &gm45_cdclk_funcs; } else if (IS_G45(dev_priv)) { - dev_priv->cdclk_funcs = &g33_cdclk_funcs; + dev_priv->display.funcs.cdclk = &g33_cdclk_funcs; } else if (IS_I965GM(dev_priv)) { - dev_priv->cdclk_funcs = &i965gm_cdclk_funcs; + dev_priv->display.funcs.cdclk = &i965gm_cdclk_funcs; } else if (IS_I965G(dev_priv)) { - dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs; + dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs; } else if (IS_PINEVIEW(dev_priv)) { - dev_priv->cdclk_funcs = &pnv_cdclk_funcs; + dev_priv->display.funcs.cdclk = &pnv_cdclk_funcs; } else if (IS_G33(dev_priv)) { - dev_priv->cdclk_funcs = &g33_cdclk_funcs; + dev_priv->display.funcs.cdclk = &g33_cdclk_funcs; } else if (IS_I945GM(dev_priv)) { - dev_priv->cdclk_funcs = &i945gm_cdclk_funcs; + dev_priv->display.funcs.cdclk = &i945gm_cdclk_funcs; } else if (IS_I945G(dev_priv)) { - dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs; + dev_priv->display.funcs.cdclk = &fixed_400mhz_cdclk_funcs; } else if (IS_I915GM(dev_priv)) { - dev_priv->cdclk_funcs = &i915gm_cdclk_funcs; + dev_priv->display.funcs.cdclk = &i915gm_cdclk_funcs; } else if (IS_I915G(dev_priv)) { - dev_priv->cdclk_funcs = &i915g_cdclk_funcs; + dev_priv->display.funcs.cdclk = &i915g_cdclk_funcs; } else if (IS_I865G(dev_priv)) { - dev_priv->cdclk_funcs = &i865g_cdclk_funcs; + dev_priv->display.funcs.cdclk = &i865g_cdclk_funcs; } else if (IS_I85X(dev_priv)) { - dev_priv->cdclk_funcs = &i85x_cdclk_funcs; + dev_priv->display.funcs.cdclk = &i85x_cdclk_funcs; } else if (IS_I845G(dev_priv)) { - dev_priv->cdclk_funcs = &i845g_cdclk_funcs; + dev_priv->display.funcs.cdclk = &i845g_cdclk_funcs; } else if (IS_I830(dev_priv)) { - dev_priv->cdclk_funcs = &i830_cdclk_funcs; + dev_priv->display.funcs.cdclk = &i830_cdclk_funcs; } - if (drm_WARN(&dev_priv->drm, !dev_priv->cdclk_funcs, + if (drm_WARN(&dev_priv->drm, !dev_priv->display.funcs.cdclk, "Unknown platform. Assuming i830\n")) - dev_priv->cdclk_funcs = &i830_cdclk_funcs; + dev_priv->display.funcs.cdclk = &i830_cdclk_funcs; } diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h index b535cf6a7d9e..c674879a84a5 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.h +++ b/drivers/gpu/drm/i915/display/intel_cdclk.h @@ -77,9 +77,9 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state); #define to_intel_cdclk_state(x) container_of((x), struct intel_cdclk_state, base) #define intel_atomic_get_old_cdclk_state(state) \ - to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->cdclk.obj)) + to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj)) #define intel_atomic_get_new_cdclk_state(state) \ - to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->cdclk.obj)) + to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj)) int intel_cdclk_init(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 9583d17e858d..6bda4274eae9 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -26,6 +26,7 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_dpll.h" +#include "intel_dsb.h" #include "vlv_dsi_pll.h" struct intel_color_funcs { @@ -1167,22 +1168,22 @@ void intel_color_load_luts(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - dev_priv->color_funcs->load_luts(crtc_state); + dev_priv->display.funcs.color->load_luts(crtc_state); } void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - if (dev_priv->color_funcs->color_commit_noarm) - dev_priv->color_funcs->color_commit_noarm(crtc_state); + if (dev_priv->display.funcs.color->color_commit_noarm) + dev_priv->display.funcs.color->color_commit_noarm(crtc_state); } void intel_color_commit_arm(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - dev_priv->color_funcs->color_commit_arm(crtc_state); + dev_priv->display.funcs.color->color_commit_arm(crtc_state); } static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state) @@ -1238,15 +1239,15 @@ int intel_color_check(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - return dev_priv->color_funcs->color_check(crtc_state); + return dev_priv->display.funcs.color->color_check(crtc_state); } void intel_color_get_config(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - if (dev_priv->color_funcs->read_luts) - dev_priv->color_funcs->read_luts(crtc_state); + if (dev_priv->display.funcs.color->read_luts) + dev_priv->display.funcs.color->read_luts(crtc_state); } static bool need_plane_update(struct intel_plane *plane, @@ -2225,28 +2226,28 @@ void intel_color_init(struct intel_crtc *crtc) if (HAS_GMCH(dev_priv)) { if (IS_CHERRYVIEW(dev_priv)) { - dev_priv->color_funcs = &chv_color_funcs; + dev_priv->display.funcs.color = &chv_color_funcs; } else if (DISPLAY_VER(dev_priv) >= 4) { - dev_priv->color_funcs = &i965_color_funcs; + dev_priv->display.funcs.color = &i965_color_funcs; } else { - dev_priv->color_funcs = &i9xx_color_funcs; + dev_priv->display.funcs.color = &i9xx_color_funcs; } } else { if (DISPLAY_VER(dev_priv) >= 11) - dev_priv->color_funcs = &icl_color_funcs; + dev_priv->display.funcs.color = &icl_color_funcs; else if (DISPLAY_VER(dev_priv) == 10) - dev_priv->color_funcs = &glk_color_funcs; + dev_priv->display.funcs.color = &glk_color_funcs; else if (DISPLAY_VER(dev_priv) == 9) - dev_priv->color_funcs = &skl_color_funcs; + dev_priv->display.funcs.color = &skl_color_funcs; else if (DISPLAY_VER(dev_priv) == 8) - dev_priv->color_funcs = &bdw_color_funcs; + dev_priv->display.funcs.color = &bdw_color_funcs; else if (DISPLAY_VER(dev_priv) == 7) { if (IS_HASWELL(dev_priv)) - dev_priv->color_funcs = &hsw_color_funcs; + dev_priv->display.funcs.color = &hsw_color_funcs; else - dev_priv->color_funcs = &ivb_color_funcs; + dev_priv->display.funcs.color = &ivb_color_funcs; } else - dev_priv->color_funcs = &ilk_color_funcs; + dev_priv->display.funcs.color = &ilk_color_funcs; } drm_crtc_enable_color_mgmt(&crtc->base, diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c index 1dcc268927a2..6d5cbeb8df4d 100644 --- a/drivers/gpu/drm/i915/display/intel_connector.c +++ b/drivers/gpu/drm/i915/display/intel_connector.c @@ -229,7 +229,7 @@ intel_attach_force_audio_property(struct drm_connector *connector) struct drm_i915_private *dev_priv = to_i915(dev); struct drm_property *prop; - prop = dev_priv->force_audio_property; + prop = dev_priv->display.properties.force_audio; if (prop == NULL) { prop = drm_property_create_enum(dev, 0, "audio", @@ -238,7 +238,7 @@ intel_attach_force_audio_property(struct drm_connector *connector) if (prop == NULL) return; - dev_priv->force_audio_property = prop; + dev_priv->display.properties.force_audio = prop; } drm_object_attach_property(&connector->base, prop, 0); } @@ -256,7 +256,7 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector) struct drm_i915_private *dev_priv = to_i915(dev); struct drm_property *prop; - prop = dev_priv->broadcast_rgb_property; + prop = dev_priv->display.properties.broadcast_rgb; if (prop == NULL) { prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, "Broadcast RGB", @@ -265,7 +265,7 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector) if (prop == NULL) return; - dev_priv->broadcast_rgb_property = prop; + dev_priv->display.properties.broadcast_rgb = prop; } drm_object_attach_property(&connector->base, prop, 0); diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 6a3893c8ff22..4a8ff2f97608 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -46,6 +46,7 @@ #include "intel_gmbus.h" #include "intel_hotplug.h" #include "intel_pch_display.h" +#include "intel_pch_refclk.h" /* Here's the desired hotplug mode */ #define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \ @@ -444,6 +445,8 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder, /* FDI must always be 2.7 GHz */ pipe_config->port_clock = 135000 * 2; + adjusted_mode->crtc_clock = lpt_iclkip(pipe_config); + return 0; } @@ -643,9 +646,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) struct i2c_adapter *i2c; bool ret = false; - BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); - - i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin); + i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->display.vbt.crt_ddc_pin); edid = intel_crt_get_edid(connector, i2c); if (edid) { @@ -931,7 +932,7 @@ static int intel_crt_get_modes(struct drm_connector *connector) wakeref = intel_display_power_get(dev_priv, intel_encoder->power_domain); - i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin); + i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->display.vbt.crt_ddc_pin); ret = intel_crt_ddc_get_modes(connector, i2c); if (ret || !IS_G4X(dev_priv)) goto out; @@ -1110,8 +1111,8 @@ void intel_crt_init(struct drm_i915_private *dev_priv) u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT | FDI_RX_LINK_REVERSAL_OVERRIDE; - dev_priv->fdi_rx_config = intel_de_read(dev_priv, - FDI_RX_CTL(PIPE_A)) & fdi_config; + dev_priv->display.fdi.rx_config = intel_de_read(dev_priv, + FDI_RX_CTL(PIPE_A)) & fdi_config; } intel_crt_reset(&crt->base.base); diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c index 4ca6e9493ff2..e9212f69c360 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c +++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c @@ -134,8 +134,8 @@ static void intel_dump_plane_state(const struct intel_plane_state *plane_state) plane->base.base.id, plane->base.name, fb->base.id, fb->width, fb->height, &fb->format->format, fb->modifier, str_yes_no(plane_state->uapi.visible)); - drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n", - plane_state->hw.rotation, plane_state->scaler_id); + drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d, scaling_filter: %d\n", + plane_state->hw.rotation, plane_state->scaler_id, plane_state->hw.scaling_filter); if (plane_state->uapi.visible) drm_dbg_kms(&i915->drm, "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n", @@ -262,10 +262,11 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, if (DISPLAY_VER(i915) >= 9) drm_dbg_kms(&i915->drm, - "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", + "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d, scaling_filter: %d\n", crtc->num_scalers, pipe_config->scaler_state.scaler_users, - pipe_config->scaler_state.scaler_id); + pipe_config->scaler_state.scaler_id, + pipe_config->hw.scaling_filter); if (HAS_GMCH(i915)) drm_dbg_kms(&i915->drm, diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c index 16ac560eab49..87899e89b3a7 100644 --- a/drivers/gpu/drm/i915/display/intel_cursor.c +++ b/drivers/gpu/drm/i915/display/intel_cursor.c @@ -19,9 +19,9 @@ #include "intel_fb.h" #include "intel_fb_pin.h" #include "intel_frontbuffer.h" -#include "intel_pm.h" #include "intel_psr.h" #include "intel_sprite.h" +#include "skl_watermark.h" /* Cursor formats */ static const u32 intel_cursor_formats[] = { diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 2330604b0bcc..da8472cdc135 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -57,6 +57,7 @@ #include "intel_lspcon.h" #include "intel_pps.h" #include "intel_psr.h" +#include "intel_quirks.h" #include "intel_snps_phy.h" #include "intel_sprite.h" #include "intel_tc.h" @@ -323,28 +324,6 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv, } } -int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config) -{ - int dotclock; - - if (intel_crtc_has_dp_encoder(pipe_config)) - dotclock = intel_dotclock_calculate(pipe_config->port_clock, - &pipe_config->dp_m_n); - else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24) - dotclock = pipe_config->port_clock * 24 / pipe_config->pipe_bpp; - else - dotclock = pipe_config->port_clock; - - if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && - !intel_crtc_has_dp_encoder(pipe_config)) - dotclock *= 2; - - if (pipe_config->pixel_multiplier) - dotclock /= pipe_config->pixel_multiplier; - - return dotclock; -} - static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) { /* CRT dotclock is determined via other means */ @@ -631,7 +610,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), ctl); - if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME && + if (intel_has_quirk(dev_priv, QUIRK_INCREASE_DDI_DISABLED_TIME) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { drm_dbg_kms(&dev_priv->drm, "Quirk Increase DDI disabled time\n"); @@ -1425,7 +1404,7 @@ hsw_set_signal_levels(struct intel_encoder *encoder, static void _icl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg, u32 clk_sel_mask, u32 clk_sel, u32 clk_off) { - mutex_lock(&i915->dpll.lock); + mutex_lock(&i915->display.dpll.lock); intel_de_rmw(i915, reg, clk_sel_mask, clk_sel); @@ -1435,17 +1414,17 @@ static void _icl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg, */ intel_de_rmw(i915, reg, clk_off, 0); - mutex_unlock(&i915->dpll.lock); + mutex_unlock(&i915->display.dpll.lock); } static void _icl_ddi_disable_clock(struct drm_i915_private *i915, i915_reg_t reg, u32 clk_off) { - mutex_lock(&i915->dpll.lock); + mutex_lock(&i915->display.dpll.lock); intel_de_rmw(i915, reg, 0, clk_off); - mutex_unlock(&i915->dpll.lock); + mutex_unlock(&i915->display.dpll.lock); } static bool _icl_ddi_is_clock_enabled(struct drm_i915_private *i915, i915_reg_t reg, @@ -1720,12 +1699,12 @@ static void icl_ddi_tc_enable_clock(struct intel_encoder *encoder, intel_de_write(i915, DDI_CLK_SEL(port), icl_pll_to_ddi_clk_sel(encoder, crtc_state)); - mutex_lock(&i915->dpll.lock); + mutex_lock(&i915->display.dpll.lock); intel_de_rmw(i915, ICL_DPCLKA_CFGCR0, ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port), 0); - mutex_unlock(&i915->dpll.lock); + mutex_unlock(&i915->display.dpll.lock); } static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder) @@ -1734,12 +1713,12 @@ static void icl_ddi_tc_disable_clock(struct intel_encoder *encoder) enum tc_port tc_port = intel_port_to_tc(i915, encoder->port); enum port port = encoder->port; - mutex_lock(&i915->dpll.lock); + mutex_lock(&i915->display.dpll.lock); intel_de_rmw(i915, ICL_DPCLKA_CFGCR0, 0, ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port)); - mutex_unlock(&i915->dpll.lock); + mutex_unlock(&i915->display.dpll.lock); intel_de_write(i915, DDI_CLK_SEL(port), DDI_CLK_SEL_NONE); } @@ -1824,7 +1803,7 @@ static void skl_ddi_enable_clock(struct intel_encoder *encoder, if (drm_WARN_ON(&i915->drm, !pll)) return; - mutex_lock(&i915->dpll.lock); + mutex_lock(&i915->display.dpll.lock); intel_de_rmw(i915, DPLL_CTRL2, DPLL_CTRL2_DDI_CLK_OFF(port) | @@ -1832,7 +1811,7 @@ static void skl_ddi_enable_clock(struct intel_encoder *encoder, DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) | DPLL_CTRL2_DDI_SEL_OVERRIDE(port)); - mutex_unlock(&i915->dpll.lock); + mutex_unlock(&i915->display.dpll.lock); } static void skl_ddi_disable_clock(struct intel_encoder *encoder) @@ -1840,12 +1819,12 @@ static void skl_ddi_disable_clock(struct intel_encoder *encoder) struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum port port = encoder->port; - mutex_lock(&i915->dpll.lock); + mutex_lock(&i915->display.dpll.lock); intel_de_rmw(i915, DPLL_CTRL2, 0, DPLL_CTRL2_DDI_CLK_OFF(port)); - mutex_unlock(&i915->dpll.lock); + mutex_unlock(&i915->display.dpll.lock); } static bool skl_ddi_is_clock_enabled(struct intel_encoder *encoder) @@ -2691,10 +2670,14 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state, dig_port->set_infoframes(encoder, false, old_crtc_state, old_conn_state); - intel_ddi_disable_pipe_clock(old_crtc_state); + if (DISPLAY_VER(dev_priv) < 12) + intel_ddi_disable_pipe_clock(old_crtc_state); intel_disable_ddi_buf(encoder, old_crtc_state); + if (DISPLAY_VER(dev_priv) >= 12) + intel_ddi_disable_pipe_clock(old_crtc_state); + intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain, fetch_and_zero(&dig_port->ddi_io_wakeref)); @@ -2862,6 +2845,8 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state, struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_connector *connector = conn_state->connector; enum port port = encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, port); + u32 buf_ctl; if (!intel_hdmi_handle_sink_scrambling(encoder, connector, crtc_state->hdmi_high_tmds_clock_ratio, @@ -2919,8 +2904,12 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state, * On ADL_P the PHY link rate and lane count must be programmed but * these are both 0 for HDMI. */ - intel_de_write(dev_priv, DDI_BUF_CTL(port), - dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE); + buf_ctl = dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE; + if (IS_ALDERLAKE_P(dev_priv) && intel_phy_is_tc(dev_priv, phy)) { + drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port)); + buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP; + } + intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl); intel_audio_codec_enable(encoder, crtc_state, conn_state); } @@ -3611,10 +3600,22 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder, static bool intel_ddi_initial_fastset_check(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { - if (intel_crtc_has_dp_encoder(crtc_state)) - return intel_dp_initial_fastset_check(encoder, crtc_state); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); + bool fastset = true; - return true; + if (intel_phy_is_tc(i915, phy)) { + drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Forcing full modeset to compute TC port DPLLs\n", + encoder->base.base.id, encoder->base.name); + crtc_state->uapi.mode_changed = true; + fastset = false; + } + + if (intel_crtc_has_dp_encoder(crtc_state) && + !intel_dp_initial_fastset_check(encoder, crtc_state)) + fastset = false; + + return fastset; } static enum intel_output_type @@ -4028,7 +4029,7 @@ intel_ddi_hotplug(struct intel_encoder *encoder, static bool lpt_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin]; + u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin]; return intel_de_read(dev_priv, SDEISR) & bit; } @@ -4036,7 +4037,7 @@ static bool lpt_digital_port_connected(struct intel_encoder *encoder) static bool hsw_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin]; + u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin]; return intel_de_read(dev_priv, DEISR) & bit; } @@ -4044,7 +4045,7 @@ static bool hsw_digital_port_connected(struct intel_encoder *encoder) static bool bdw_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin]; + u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin]; return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit; } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index f4f7c3414762..dd008ba8afe3 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -91,6 +91,7 @@ #include "intel_dmc.h" #include "intel_dp_link_training.h" #include "intel_dpt.h" +#include "intel_dsb.h" #include "intel_fbc.h" #include "intel_fbdev.h" #include "intel_fdi.h" @@ -117,6 +118,7 @@ #include "i9xx_plane.h" #include "skl_scaler.h" #include "skl_universal_plane.h" +#include "skl_watermark.h" #include "vlv_dsi.h" #include "vlv_dsi_pll.h" #include "vlv_dsi_regs.h" @@ -163,16 +165,16 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state); */ void intel_update_watermarks(struct drm_i915_private *dev_priv) { - if (dev_priv->wm_disp->update_wm) - dev_priv->wm_disp->update_wm(dev_priv); + if (dev_priv->display.funcs.wm->update_wm) + dev_priv->display.funcs.wm->update_wm(dev_priv); } static int intel_compute_pipe_wm(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (dev_priv->wm_disp->compute_pipe_wm) - return dev_priv->wm_disp->compute_pipe_wm(state, crtc); + if (dev_priv->display.funcs.wm->compute_pipe_wm) + return dev_priv->display.funcs.wm->compute_pipe_wm(state, crtc); return 0; } @@ -180,20 +182,20 @@ static int intel_compute_intermediate_wm(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (!dev_priv->wm_disp->compute_intermediate_wm) + if (!dev_priv->display.funcs.wm->compute_intermediate_wm) return 0; if (drm_WARN_ON(&dev_priv->drm, - !dev_priv->wm_disp->compute_pipe_wm)) + !dev_priv->display.funcs.wm->compute_pipe_wm)) return 0; - return dev_priv->wm_disp->compute_intermediate_wm(state, crtc); + return dev_priv->display.funcs.wm->compute_intermediate_wm(state, crtc); } static bool intel_initial_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (dev_priv->wm_disp->initial_watermarks) { - dev_priv->wm_disp->initial_watermarks(state, crtc); + if (dev_priv->display.funcs.wm->initial_watermarks) { + dev_priv->display.funcs.wm->initial_watermarks(state, crtc); return true; } return false; @@ -203,23 +205,23 @@ static void intel_atomic_update_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (dev_priv->wm_disp->atomic_update_watermarks) - dev_priv->wm_disp->atomic_update_watermarks(state, crtc); + if (dev_priv->display.funcs.wm->atomic_update_watermarks) + dev_priv->display.funcs.wm->atomic_update_watermarks(state, crtc); } static void intel_optimize_watermarks(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (dev_priv->wm_disp->optimize_watermarks) - dev_priv->wm_disp->optimize_watermarks(state, crtc); + if (dev_priv->display.funcs.wm->optimize_watermarks) + dev_priv->display.funcs.wm->optimize_watermarks(state, crtc); } static int intel_compute_global_watermarks(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (dev_priv->wm_disp->compute_global_watermarks) - return dev_priv->wm_disp->compute_global_watermarks(state); + if (dev_priv->display.funcs.wm->compute_global_watermarks) + return dev_priv->display.funcs.wm->compute_global_watermarks(state); return 0; } @@ -618,7 +620,10 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) if (!IS_I830(dev_priv)) val &= ~PIPECONF_ENABLE; - if (DISPLAY_VER(dev_priv) >= 12) + if (DISPLAY_VER(dev_priv) >= 14) + intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), + FECSTALL_DIS_DPTSTREAM_DPTTG, 0); + else if (DISPLAY_VER(dev_priv) >= 12) intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), FECSTALL_DIS_DPTSTREAM_DPTTG, 0); @@ -1486,7 +1491,7 @@ static void intel_encoders_update_prepare(struct intel_atomic_state *state) * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits. * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook. */ - if (i915->dpll.mgr) { + if (i915->display.dpll.mgr) { for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (intel_crtc_needs_modeset(new_crtc_state)) continue; @@ -1838,7 +1843,9 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder); + enum transcoder transcoder = crtc_state->cpu_transcoder; + i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) : + CHICKEN_TRANS(transcoder); u32 val; val = intel_de_read(dev_priv, reg); @@ -2080,22 +2087,20 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) { if (phy == PHY_NONE) return false; - else if (IS_DG2(dev_priv)) - /* - * DG2 outputs labelled as "combo PHY" in the bspec use - * SNPS PHYs with completely different programming, - * hence we always return false here. - */ - return false; else if (IS_ALDERLAKE_S(dev_priv)) return phy <= PHY_E; else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) return phy <= PHY_D; else if (IS_JSL_EHL(dev_priv)) return phy <= PHY_C; - else if (DISPLAY_VER(dev_priv) >= 11) + else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12)) return phy <= PHY_B; else + /* + * DG2 outputs labelled as "combo PHY" in the bspec use + * SNPS PHYs with completely different programming, + * hence we always return false here. + */ return false; } @@ -2401,7 +2406,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state, if (DISPLAY_VER(dev_priv) != 2) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); - if (!dev_priv->wm_disp->initial_watermarks) + if (!dev_priv->display.funcs.wm->initial_watermarks) intel_update_watermarks(dev_priv); /* clock the pipe down to 640x480@60 to potentially save power */ @@ -2660,7 +2665,7 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state) intel_mode_from_crtc_timings(pipe_mode, pipe_mode); if (DISPLAY_VER(i915) < 4) { - clock_limit = i915->max_cdclk_freq * 9 / 10; + clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10; /* * Enable double wide mode when the dot clock @@ -2692,6 +2697,10 @@ static int intel_crtc_compute_config(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); int ret; + ret = intel_dpll_crtc_compute_clock(state, crtc); + if (ret) + return ret; + ret = intel_crtc_compute_pipe_src(crtc_state); if (ret) return ret; @@ -2718,19 +2727,11 @@ intel_reduce_m_n_ratio(u32 *num, u32 *den) } } -static void compute_m_n(unsigned int m, unsigned int n, - u32 *ret_m, u32 *ret_n, - bool constant_n) +static void compute_m_n(u32 *ret_m, u32 *ret_n, + u32 m, u32 n, u32 constant_n) { - /* - * Several DP dongles in particular seem to be fussy about - * too large link M/N values. Give N value as 0x8000 that - * should be acceptable by specific devices. 0x8000 is the - * specified fixed N value for asynchronous clock mode, - * which the devices expect also in synchronous clock mode. - */ if (constant_n) - *ret_n = DP_LINK_CONSTANT_N_VALUE; + *ret_n = constant_n; else *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); @@ -2742,22 +2743,28 @@ void intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, int pixel_clock, int link_clock, struct intel_link_m_n *m_n, - bool constant_n, bool fec_enable) + bool fec_enable) { u32 data_clock = bits_per_pixel * pixel_clock; if (fec_enable) data_clock = intel_dp_mode_to_fec_clock(data_clock); + /* + * Windows/BIOS uses fixed M/N values always. Follow suit. + * + * Also several DP dongles in particular seem to be fussy + * about too large link M/N values. Presumably the 20bit + * value used by Windows/BIOS is acceptable to everyone. + */ m_n->tu = 64; - compute_m_n(data_clock, - link_clock * nlanes * 8, - &m_n->data_m, &m_n->data_n, - constant_n); + compute_m_n(&m_n->data_m, &m_n->data_n, + data_clock, link_clock * nlanes * 8, + 0x8000000); - compute_m_n(pixel_clock, link_clock, - &m_n->link_m, &m_n->link_n, - constant_n); + compute_m_n(&m_n->link_m, &m_n->link_n, + pixel_clock, link_clock, + 0x80000); } static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) @@ -2773,12 +2780,12 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) PCH_DREF_CONTROL) & DREF_SSC1_ENABLE; - if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { + if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) { drm_dbg_kms(&dev_priv->drm, "SSC %s by BIOS, overriding VBT which says %s\n", str_enabled_disabled(bios_lvds_use_ssc), - str_enabled_disabled(dev_priv->vbt.lvds_use_ssc)); - dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; + str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc)); + dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc; } } } @@ -4126,7 +4133,9 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, } if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { - tmp = intel_de_read(dev_priv, CHICKEN_TRANS(pipe_config->cpu_transcoder)); + tmp = intel_de_read(dev_priv, DISPLAY_VER(dev_priv) >= 14 ? + MTL_CHICKEN_TRANS(pipe_config->cpu_transcoder) : + CHICKEN_TRANS(pipe_config->cpu_transcoder)); pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1; } else { @@ -4145,7 +4154,7 @@ bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); - if (!i915->display->get_pipe_config(crtc, crtc_state)) + if (!i915->display.funcs.display->get_pipe_config(crtc, crtc_state)) return false; crtc_state->hw.active = true; @@ -4374,7 +4383,7 @@ static int i9xx_pll_refclk(struct drm_device *dev, u32 dpll = pipe_config->dpll_hw_state.dpll; if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) - return dev_priv->vbt.lvds_ssc_freq; + return dev_priv->display.vbt.lvds_ssc_freq; else if (HAS_PCH_SPLIT(dev_priv)) return 120000; else if (DISPLAY_VER(dev_priv) != 2) @@ -4492,7 +4501,31 @@ int intel_dotclock_calculate(int link_freq, if (!m_n->link_n) return 0; - return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n); + return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq), + m_n->link_n); +} + +int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config) +{ + int dotclock; + + if (intel_crtc_has_dp_encoder(pipe_config)) + dotclock = intel_dotclock_calculate(pipe_config->port_clock, + &pipe_config->dp_m_n); + else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24) + dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 24, + pipe_config->pipe_bpp); + else + dotclock = pipe_config->port_clock; + + if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && + !intel_crtc_has_dp_encoder(pipe_config)) + dotclock *= 2; + + if (pipe_config->pixel_multiplier) + dotclock /= pipe_config->pixel_multiplier; + + return dotclock; } /* Returns the currently programmed mode of the given encoder. */ @@ -4753,7 +4786,7 @@ static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) /* Display WA #1135: BXT:ALL GLK:ALL */ if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && - dev_priv->ipc_enabled) + skl_watermark_ipc_enabled(dev_priv)) linetime_wm /= 2; return min(linetime_wm, 0x1ff); @@ -4799,10 +4832,6 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, crtc_state->update_wm_post = true; if (mode_changed) { - ret = intel_dpll_crtc_compute_clock(state, crtc); - if (ret) - return ret; - ret = intel_dpll_crtc_get_shared_dpll(state, crtc); if (ret) return ret; @@ -5367,46 +5396,14 @@ bool intel_fuzzy_clock_check(int clock1, int clock2) } static bool -intel_compare_m_n(unsigned int m, unsigned int n, - unsigned int m2, unsigned int n2, - bool exact) -{ - if (m == m2 && n == n2) - return true; - - if (exact || !m || !n || !m2 || !n2) - return false; - - BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); - - if (n > n2) { - while (n > n2) { - m2 <<= 1; - n2 <<= 1; - } - } else if (n < n2) { - while (n < n2) { - m <<= 1; - n <<= 1; - } - } - - if (n != n2) - return false; - - return intel_fuzzy_clock_check(m, m2); -} - -static bool intel_compare_link_m_n(const struct intel_link_m_n *m_n, - const struct intel_link_m_n *m2_n2, - bool exact) + const struct intel_link_m_n *m2_n2) { return m_n->tu == m2_n2->tu && - intel_compare_m_n(m_n->data_m, m_n->data_n, - m2_n2->data_m, m2_n2->data_n, exact) && - intel_compare_m_n(m_n->link_m, m_n->link_n, - m2_n2->link_m, m2_n2->link_n, exact); + m_n->data_m == m2_n2->data_m && + m_n->data_n == m2_n2->data_n && + m_n->link_m == m2_n2->link_m && + m_n->link_n == m2_n2->link_n; } static bool @@ -5600,8 +5597,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #define PIPE_CONF_CHECK_M_N(name) do { \ if (!intel_compare_link_m_n(¤t_config->name, \ - &pipe_config->name,\ - !fastset)) { \ + &pipe_config->name)) { \ pipe_config_mismatch(fastset, crtc, __stringify(name), \ "(expected tu %i data %i/%i link %i/%i, " \ "found tu %i, data %i/%i link %i/%i)", \ @@ -5648,9 +5644,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, */ #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \ if (!intel_compare_link_m_n(¤t_config->name, \ - &pipe_config->name, !fastset) && \ + &pipe_config->name) && \ !intel_compare_link_m_n(¤t_config->alt_name, \ - &pipe_config->name, !fastset)) { \ + &pipe_config->name)) { \ pipe_config_mismatch(fastset, crtc, __stringify(name), \ "(expected tu %i data %i/%i link %i/%i, " \ "or tu %i data %i/%i link %i/%i, " \ @@ -5685,16 +5681,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, } \ } while (0) -#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \ - if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ - pipe_config_mismatch(fastset, crtc, __stringify(name), \ - "(expected %i, found %i)", \ - current_config->name, \ - pipe_config->name); \ - ret = false; \ - } \ -} while (0) - #define PIPE_CONF_CHECK_INFOFRAME(name) do { \ if (!intel_compare_infoframe(¤t_config->infoframes.name, \ &pipe_config->infoframes.name)) { \ @@ -5750,8 +5736,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_I(lane_count); PIPE_CONF_CHECK_X(lane_lat_optim_mask); - if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) { - PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); + if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) { + if (!fastset || !pipe_config->seamless_m_n) + PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); } else { PIPE_CONF_CHECK_M_N(dp_m_n); PIPE_CONF_CHECK_M_N(dp_m2_n2); @@ -5813,7 +5800,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_RECT(pch_pfit.dst); PIPE_CONF_CHECK_I(scaler_state.scaler_id); - PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); + PIPE_CONF_CHECK_I(pixel_rate); PIPE_CONF_CHECK_X(gamma_mode); if (IS_CHERRYVIEW(dev_priv)) @@ -5840,7 +5827,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_BOOL(double_wide); - if (dev_priv->dpll.mgr) { + if (dev_priv->display.dpll.mgr) { PIPE_CONF_CHECK_P(shared_dpll); PIPE_CONF_CHECK_X(dpll_hw_state.dpll); @@ -5883,9 +5870,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) PIPE_CONF_CHECK_I(pipe_bpp); - PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock); - PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock); - PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); + if (!fastset || !pipe_config->seamless_m_n) { + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock); + } + PIPE_CONF_CHECK_I(port_clock); PIPE_CONF_CHECK_I(min_voltage_level); @@ -5927,7 +5916,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE #undef PIPE_CONF_CHECK_P #undef PIPE_CONF_CHECK_FLAGS -#undef PIPE_CONF_CHECK_CLOCK_FUZZY #undef PIPE_CONF_CHECK_COLOR_LUT #undef PIPE_CONF_CHECK_TIMINGS #undef PIPE_CONF_CHECK_RECT @@ -6049,20 +6037,6 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) } } -static void intel_modeset_clear_plls(struct intel_atomic_state *state) -{ - struct intel_crtc_state *new_crtc_state; - struct intel_crtc *crtc; - int i; - - for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { - if (!intel_crtc_needs_modeset(new_crtc_state)) - continue; - - intel_release_shared_dplls(state, crtc); - } -} - /* * This implements the workaround described in the "notes" section of the mode * set sequence documentation. When going from no pipes or single pipe to @@ -6163,23 +6137,6 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta new_crtc_state->update_pipe = true; } -static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state, - struct intel_crtc_state *new_crtc_state) -{ - /* - * If we're not doing the full modeset we want to - * keep the current M/N values as they may be - * sufficiently different to the computed values - * to cause problems. - * - * FIXME: should really copy more fuzzy state here - */ - new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n; - new_crtc_state->dp_m_n = old_crtc_state->dp_m_n; - new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2; - new_crtc_state->has_drrs = old_crtc_state->has_drrs; -} - static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state, struct intel_crtc *crtc, u8 plane_ids_mask) @@ -6836,9 +6793,11 @@ static int intel_atomic_check(struct drm_device *dev, if (!intel_crtc_needs_modeset(new_crtc_state)) continue; - ret = intel_modeset_pipe_config_late(state, crtc); - if (ret) - goto fail; + if (new_crtc_state->hw.enable) { + ret = intel_modeset_pipe_config_late(state, crtc); + if (ret) + goto fail; + } intel_crtc_check_fastset(old_crtc_state, new_crtc_state); } @@ -6889,15 +6848,12 @@ static int intel_atomic_check(struct drm_device *dev, for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (intel_crtc_needs_modeset(new_crtc_state)) { - any_ms = true; + if (!intel_crtc_needs_modeset(new_crtc_state)) continue; - } - if (!new_crtc_state->update_pipe) - continue; + any_ms = true; - intel_crtc_copy_fastset(old_crtc_state, new_crtc_state); + intel_release_shared_dplls(state, crtc); } if (any_ms && !check_digital_port_conflicts(state)) { @@ -6938,8 +6894,6 @@ static int intel_atomic_check(struct drm_device *dev, ret = intel_modeset_calc_cdclk(state); if (ret) return ret; - - intel_modeset_clear_plls(state); } ret = intel_atomic_check_crtcs(state); @@ -7058,6 +7012,10 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) hsw_set_linetime_wm(new_crtc_state); + + if (new_crtc_state->seamless_m_n) + intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder, + &new_crtc_state->dp_m_n); } static void commit_pipe_pre_planes(struct intel_atomic_state *state, @@ -7120,7 +7078,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state, intel_crtc_update_active_timings(new_crtc_state); - dev_priv->display->crtc_enable(state, crtc); + dev_priv->display.funcs.display->crtc_enable(state, crtc); if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) return; @@ -7199,7 +7157,7 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state, */ intel_crtc_disable_pipe_crc(crtc); - dev_priv->display->crtc_disable(state, crtc); + dev_priv->display.funcs.display->crtc_disable(state, crtc); crtc->active = false; intel_fbc_disable(crtc); intel_disable_shared_dpll(old_crtc_state); @@ -7410,7 +7368,7 @@ static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) struct intel_atomic_state *state, *next; struct llist_node *freed; - freed = llist_del_all(&dev_priv->atomic_helper.free_list); + freed = llist_del_all(&dev_priv->display.atomic_helper.free_list); llist_for_each_entry_safe(state, next, freed, freed) drm_atomic_state_put(&state->base); } @@ -7418,7 +7376,7 @@ static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) static void intel_atomic_helper_free_state_worker(struct work_struct *work) { struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), atomic_helper.free_work); + container_of(work, typeof(*dev_priv), display.atomic_helper.free_work); intel_atomic_helper_free_state(dev_priv); } @@ -7588,7 +7546,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) } /* Now enable the clocks, plane, pipe, and connectors that we set up. */ - dev_priv->display->commit_modeset_enables(state); + dev_priv->display.funcs.display->commit_modeset_enables(state); intel_encoders_update_complete(state); @@ -7711,7 +7669,7 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence, case FENCE_FREE: { struct intel_atomic_helper *helper = - &to_i915(state->base.dev)->atomic_helper; + &to_i915(state->base.dev)->display.atomic_helper; if (llist_add(&state->freed, &helper->free_list)) schedule_work(&helper->free_work); @@ -7814,12 +7772,12 @@ static int intel_atomic_commit(struct drm_device *dev, i915_sw_fence_commit(&state->commit_ready); if (nonblock && state->modeset) { - queue_work(dev_priv->modeset_wq, &state->base.commit_work); + queue_work(dev_priv->display.wq.modeset, &state->base.commit_work); } else if (nonblock) { - queue_work(dev_priv->flip_wq, &state->base.commit_work); + queue_work(dev_priv->display.wq.flip, &state->base.commit_work); } else { if (state->modeset) - flush_workqueue(dev_priv->modeset_wq); + flush_workqueue(dev_priv->display.wq.modeset); intel_atomic_commit_tail(state); } @@ -7925,7 +7883,7 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) return false; - if (!dev_priv->vbt.int_crt_support) + if (!dev_priv->display.vbt.int_crt_support) return false; return true; @@ -8060,7 +8018,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { bool has_edp, has_port; - if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support) + if (IS_VALLEYVIEW(dev_priv) && dev_priv->display.vbt.int_crt_support) intel_crt_init(dev_priv); /* @@ -8319,7 +8277,7 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { .atomic_state_free = intel_atomic_state_free, }; -static const struct drm_i915_display_funcs skl_display_funcs = { +static const struct intel_display_funcs skl_display_funcs = { .get_pipe_config = hsw_get_pipe_config, .crtc_enable = hsw_crtc_enable, .crtc_disable = hsw_crtc_disable, @@ -8327,7 +8285,7 @@ static const struct drm_i915_display_funcs skl_display_funcs = { .get_initial_plane_config = skl_get_initial_plane_config, }; -static const struct drm_i915_display_funcs ddi_display_funcs = { +static const struct intel_display_funcs ddi_display_funcs = { .get_pipe_config = hsw_get_pipe_config, .crtc_enable = hsw_crtc_enable, .crtc_disable = hsw_crtc_disable, @@ -8335,7 +8293,7 @@ static const struct drm_i915_display_funcs ddi_display_funcs = { .get_initial_plane_config = i9xx_get_initial_plane_config, }; -static const struct drm_i915_display_funcs pch_split_display_funcs = { +static const struct intel_display_funcs pch_split_display_funcs = { .get_pipe_config = ilk_get_pipe_config, .crtc_enable = ilk_crtc_enable, .crtc_disable = ilk_crtc_disable, @@ -8343,7 +8301,7 @@ static const struct drm_i915_display_funcs pch_split_display_funcs = { .get_initial_plane_config = i9xx_get_initial_plane_config, }; -static const struct drm_i915_display_funcs vlv_display_funcs = { +static const struct intel_display_funcs vlv_display_funcs = { .get_pipe_config = i9xx_get_pipe_config, .crtc_enable = valleyview_crtc_enable, .crtc_disable = i9xx_crtc_disable, @@ -8351,7 +8309,7 @@ static const struct drm_i915_display_funcs vlv_display_funcs = { .get_initial_plane_config = i9xx_get_initial_plane_config, }; -static const struct drm_i915_display_funcs i9xx_display_funcs = { +static const struct intel_display_funcs i9xx_display_funcs = { .get_pipe_config = i9xx_get_pipe_config, .crtc_enable = i9xx_crtc_enable, .crtc_disable = i9xx_crtc_disable, @@ -8374,16 +8332,16 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) intel_dpll_init_clock_hook(dev_priv); if (DISPLAY_VER(dev_priv) >= 9) { - dev_priv->display = &skl_display_funcs; + dev_priv->display.funcs.display = &skl_display_funcs; } else if (HAS_DDI(dev_priv)) { - dev_priv->display = &ddi_display_funcs; + dev_priv->display.funcs.display = &ddi_display_funcs; } else if (HAS_PCH_SPLIT(dev_priv)) { - dev_priv->display = &pch_split_display_funcs; + dev_priv->display.funcs.display = &pch_split_display_funcs; } else if (IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv)) { - dev_priv->display = &vlv_display_funcs; + dev_priv->display.funcs.display = &vlv_display_funcs; } else { - dev_priv->display = &i9xx_display_funcs; + dev_priv->display.funcs.display = &i9xx_display_funcs; } intel_fdi_init_hook(dev_priv); @@ -8396,11 +8354,11 @@ void intel_modeset_init_hw(struct drm_i915_private *i915) if (!HAS_DISPLAY(i915)) return; - cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state); + cdclk_state = to_intel_cdclk_state(i915->display.cdclk.obj.state); intel_update_cdclk(i915); - intel_cdclk_dump_config(i915, &i915->cdclk.hw, "Current CDCLK"); - cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw; + intel_cdclk_dump_config(i915, &i915->display.cdclk.hw, "Current CDCLK"); + cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw; } static int sanitize_watermarks_add_affected(struct drm_atomic_state *state) @@ -8456,7 +8414,7 @@ static void sanitize_watermarks(struct drm_i915_private *dev_priv) int i; /* Only supported on platforms that use atomic watermark design */ - if (!dev_priv->wm_disp->optimize_watermarks) + if (!dev_priv->display.funcs.wm->optimize_watermarks) return; state = drm_atomic_state_alloc(&dev_priv->drm); @@ -8688,11 +8646,9 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915) intel_dmc_ucode_init(i915); - i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); - i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI | - WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); - - i915->window2_delay = 0; /* No DSB so no window2 delay */ + i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0); + i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI | + WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); intel_mode_config_init(i915); @@ -8708,8 +8664,8 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915) if (ret) goto cleanup_vga_client_pw_domain_dmc; - init_llist_head(&i915->atomic_helper.free_list); - INIT_WORK(&i915->atomic_helper.free_work, + init_llist_head(&i915->display.atomic_helper.free_list); + INIT_WORK(&i915->display.atomic_helper.free_work, intel_atomic_helper_free_state_worker); intel_init_quirks(i915); @@ -8769,7 +8725,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915) intel_hdcp_component_init(i915); - if (i915->max_cdclk_freq == 0) + if (i915->display.cdclk.max_cdclk_freq == 0) intel_update_max_cdclk(i915); /* @@ -8833,7 +8789,7 @@ int intel_modeset_init(struct drm_i915_private *i915) intel_hpd_init(i915); intel_hpd_poll_disable(i915); - intel_init_ipc(i915); + skl_watermark_ipc_init(i915); return 0; } @@ -8964,7 +8920,7 @@ void intel_display_resume(struct drm_device *dev) if (!ret) ret = __intel_display_resume(i915, state, &ctx); - intel_enable_ipc(i915); + skl_watermark_ipc_update(i915); drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); @@ -8999,11 +8955,18 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915) if (!HAS_DISPLAY(i915)) return; - flush_workqueue(i915->flip_wq); - flush_workqueue(i915->modeset_wq); + flush_workqueue(i915->display.wq.flip); + flush_workqueue(i915->display.wq.modeset); + + flush_work(&i915->display.atomic_helper.free_work); + drm_WARN_ON(&i915->drm, !llist_empty(&i915->display.atomic_helper.free_list)); - flush_work(&i915->atomic_helper.free_work); - drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list)); + /* + * MST topology needs to be suspended so we don't have any calls to + * fbdev after it's finalized. MST will be destroyed later as part of + * drm_mode_config_cleanup() + */ + intel_dp_mst_suspend(i915); } /* part #2: call after irq uninstall */ @@ -9018,13 +8981,6 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915) */ intel_hpd_poll_fini(i915); - /* - * MST topology needs to be suspended so we don't have any calls to - * fbdev after it's finalized. MST will be destroyed later as part of - * drm_mode_config_cleanup() - */ - intel_dp_mst_suspend(i915); - /* poll work can call into fbdev, hence clean that up afterwards */ intel_fbdev_fini(i915); @@ -9041,8 +8997,8 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915) intel_gmbus_teardown(i915); - destroy_workqueue(i915->flip_wq); - destroy_workqueue(i915->modeset_wq); + destroy_workqueue(i915->display.wq.flip); + destroy_workqueue(i915->display.wq.modeset); intel_fbc_cleanup(i915); } @@ -9089,7 +9045,7 @@ void intel_display_driver_register(struct drm_i915_private *i915) /* Must be done after probing outputs */ intel_opregion_register(i915); - acpi_video_register(); + intel_acpi_video_register(i915); intel_audio_init(i915); diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index cef251025d7a..884e8e67b17c 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -375,7 +375,7 @@ enum hpd_pin { #define for_each_pipe(__dev_priv, __p) \ for ((__p) = 0; (__p) < I915_MAX_PIPES; (__p)++) \ - for_each_if(INTEL_INFO(__dev_priv)->display.pipe_mask & BIT(__p)) + for_each_if(RUNTIME_INFO(__dev_priv)->pipe_mask & BIT(__p)) #define for_each_pipe_masked(__dev_priv, __p, __mask) \ for_each_pipe(__dev_priv, __p) \ @@ -383,7 +383,7 @@ enum hpd_pin { #define for_each_cpu_transcoder(__dev_priv, __t) \ for ((__t) = 0; (__t) < I915_MAX_TRANSCODERS; (__t)++) \ - for_each_if (INTEL_INFO(__dev_priv)->display.cpu_transcoder_mask & BIT(__t)) + for_each_if (RUNTIME_INFO(__dev_priv)->cpu_transcoder_mask & BIT(__t)) #define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \ for_each_cpu_transcoder(__dev_priv, __t) \ @@ -547,7 +547,7 @@ u8 intel_calc_active_pipes(struct intel_atomic_state *state, void intel_link_compute_m_n(u16 bpp, int nlanes, int pixel_clock, int link_clock, struct intel_link_m_n *m_n, - bool constant_n, bool fec_enable); + bool fec_enable); u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, u32 pixel_format, u64 modifier); enum drm_mode_status diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h new file mode 100644 index 000000000000..96cf994b0ad1 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -0,0 +1,418 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_DISPLAY_CORE_H__ +#define __INTEL_DISPLAY_CORE_H__ + +#include <linux/list.h> +#include <linux/llist.h> +#include <linux/mutex.h> +#include <linux/types.h> +#include <linux/wait.h> +#include <linux/workqueue.h> + +#include <drm/drm_connector.h> + +#include "intel_cdclk.h" +#include "intel_display.h" +#include "intel_display_power.h" +#include "intel_dmc.h" +#include "intel_dpll_mgr.h" +#include "intel_fbc.h" +#include "intel_global_state.h" +#include "intel_gmbus.h" +#include "intel_opregion.h" +#include "intel_pm_types.h" + +struct drm_i915_private; +struct drm_property; +struct i915_audio_component; +struct i915_hdcp_comp_master; +struct intel_atomic_state; +struct intel_audio_funcs; +struct intel_bios_encoder_data; +struct intel_cdclk_funcs; +struct intel_cdclk_vals; +struct intel_color_funcs; +struct intel_crtc; +struct intel_crtc_state; +struct intel_dpll_funcs; +struct intel_dpll_mgr; +struct intel_fbdev; +struct intel_fdi_funcs; +struct intel_hotplug_funcs; +struct intel_initial_plane_config; +struct intel_overlay; + +/* Amount of SAGV/QGV points, BSpec precisely defines this */ +#define I915_NUM_QGV_POINTS 8 + +/* Amount of PSF GV points, BSpec precisely defines this */ +#define I915_NUM_PSF_GV_POINTS 3 + +struct intel_display_funcs { + /* + * Returns the active state of the crtc, and if the crtc is active, + * fills out the pipe-config with the hw state. + */ + bool (*get_pipe_config)(struct intel_crtc *, + struct intel_crtc_state *); + void (*get_initial_plane_config)(struct intel_crtc *, + struct intel_initial_plane_config *); + void (*crtc_enable)(struct intel_atomic_state *state, + struct intel_crtc *crtc); + void (*crtc_disable)(struct intel_atomic_state *state, + struct intel_crtc *crtc); + void (*commit_modeset_enables)(struct intel_atomic_state *state); +}; + +/* functions used for watermark calcs for display. */ +struct intel_wm_funcs { + /* update_wm is for legacy wm management */ + void (*update_wm)(struct drm_i915_private *dev_priv); + int (*compute_pipe_wm)(struct intel_atomic_state *state, + struct intel_crtc *crtc); + int (*compute_intermediate_wm)(struct intel_atomic_state *state, + struct intel_crtc *crtc); + void (*initial_watermarks)(struct intel_atomic_state *state, + struct intel_crtc *crtc); + void (*atomic_update_watermarks)(struct intel_atomic_state *state, + struct intel_crtc *crtc); + void (*optimize_watermarks)(struct intel_atomic_state *state, + struct intel_crtc *crtc); + int (*compute_global_watermarks)(struct intel_atomic_state *state); +}; + +struct intel_audio { + /* hda/i915 audio component */ + struct i915_audio_component *component; + bool component_registered; + /* mutex for audio/video sync */ + struct mutex mutex; + int power_refcount; + u32 freq_cntrl; + + /* Used to save the pipe-to-encoder mapping for audio */ + struct intel_encoder *encoder_map[I915_MAX_PIPES]; + + /* necessary resource sharing with HDMI LPE audio driver. */ + struct { + struct platform_device *platdev; + int irq; + } lpe; +}; + +/* + * dpll and cdclk state is protected by connection_mutex dpll.lock serializes + * intel_{prepare,enable,disable}_shared_dpll. Must be global rather than per + * dpll, because on some platforms plls share registers. + */ +struct intel_dpll { + struct mutex lock; + + int num_shared_dpll; + struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; + const struct intel_dpll_mgr *mgr; + + struct { + int nssc; + int ssc; + } ref_clks; +}; + +struct intel_frontbuffer_tracking { + spinlock_t lock; + + /* + * Tracking bits for delayed frontbuffer flushing du to gpu activity or + * scheduled flips. + */ + unsigned busy_bits; + unsigned flip_bits; +}; + +struct intel_hotplug { + struct delayed_work hotplug_work; + + const u32 *hpd, *pch_hpd; + + struct { + unsigned long last_jiffies; + int count; + enum { + HPD_ENABLED = 0, + HPD_DISABLED = 1, + HPD_MARK_DISABLED = 2 + } state; + } stats[HPD_NUM_PINS]; + u32 event_bits; + u32 retry_bits; + struct delayed_work reenable_work; + + u32 long_port_mask; + u32 short_port_mask; + struct work_struct dig_port_work; + + struct work_struct poll_init_work; + bool poll_enabled; + + unsigned int hpd_storm_threshold; + /* Whether or not to count short HPD IRQs in HPD storms */ + u8 hpd_short_storm_enabled; + + /* + * if we get a HPD irq from DP and a HPD irq from non-DP + * the non-DP HPD could block the workqueue on a mode config + * mutex getting, that userspace may have taken. However + * userspace is waiting on the DP workqueue to run which is + * blocked behind the non-DP one. + */ + struct workqueue_struct *dp_wq; +}; + +struct intel_vbt_data { + /* bdb version */ + u16 version; + + /* Feature bits */ + unsigned int int_tv_support:1; + unsigned int int_crt_support:1; + unsigned int lvds_use_ssc:1; + unsigned int int_lvds_support:1; + unsigned int display_clock_mode:1; + unsigned int fdi_rx_polarity_inverted:1; + int lvds_ssc_freq; + enum drm_panel_orientation orientation; + + bool override_afc_startup; + u8 override_afc_startup_val; + + int crt_ddc_pin; + + struct list_head display_devices; + struct list_head bdb_blocks; + + struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */ + struct sdvo_device_mapping { + u8 initialized; + u8 dvo_port; + u8 slave_addr; + u8 dvo_wiring; + u8 i2c_pin; + u8 ddc_pin; + } sdvo_mappings[2]; +}; + +struct intel_wm { + /* + * Raw watermark latency values: + * in 0.1us units for WM0, + * in 0.5us units for WM1+. + */ + /* primary */ + u16 pri_latency[5]; + /* sprite */ + u16 spr_latency[5]; + /* cursor */ + u16 cur_latency[5]; + /* + * Raw watermark memory latency values + * for SKL for all 8 levels + * in 1us units. + */ + u16 skl_latency[8]; + + /* current hardware state */ + union { + struct ilk_wm_values hw; + struct vlv_wm_values vlv; + struct g4x_wm_values g4x; + }; + + u8 max_level; + + /* + * Should be held around atomic WM register writing; also + * protects * intel_crtc->wm.active and + * crtc_state->wm.need_postvbl_update. + */ + struct mutex wm_mutex; + + bool ipc_enabled; +}; + +struct intel_display { + /* Display functions */ + struct { + /* Top level crtc-ish functions */ + const struct intel_display_funcs *display; + + /* Display CDCLK functions */ + const struct intel_cdclk_funcs *cdclk; + + /* Display pll funcs */ + const struct intel_dpll_funcs *dpll; + + /* irq display functions */ + const struct intel_hotplug_funcs *hotplug; + + /* pm display functions */ + const struct intel_wm_funcs *wm; + + /* fdi display functions */ + const struct intel_fdi_funcs *fdi; + + /* Display internal color functions */ + const struct intel_color_funcs *color; + + /* Display internal audio functions */ + const struct intel_audio_funcs *audio; + } funcs; + + /* Grouping using anonymous structs. Keep sorted. */ + struct intel_atomic_helper { + struct llist_head free_list; + struct work_struct free_work; + } atomic_helper; + + struct { + /* backlight registers and fields in struct intel_panel */ + struct mutex lock; + } backlight; + + struct { + struct intel_global_obj obj; + + struct intel_bw_info { + /* for each QGV point */ + unsigned int deratedbw[I915_NUM_QGV_POINTS]; + /* for each PSF GV point */ + unsigned int psf_bw[I915_NUM_PSF_GV_POINTS]; + u8 num_qgv_points; + u8 num_psf_gv_points; + u8 num_planes; + } max[6]; + } bw; + + struct { + /* The current hardware cdclk configuration */ + struct intel_cdclk_config hw; + + /* cdclk, divider, and ratio table from bspec */ + const struct intel_cdclk_vals *table; + + struct intel_global_obj obj; + + unsigned int max_cdclk_freq; + } cdclk; + + struct { + /* The current hardware dbuf configuration */ + u8 enabled_slices; + + struct intel_global_obj obj; + } dbuf; + + struct { + /* VLV/CHV/BXT/GLK DSI MMIO register base address */ + u32 mmio_base; + } dsi; + + struct { + /* list of fbdev register on this device */ + struct intel_fbdev *fbdev; + struct work_struct suspend_work; + } fbdev; + + struct { + unsigned int pll_freq; + u32 rx_config; + } fdi; + + struct { + /* + * Base address of where the gmbus and gpio blocks are located + * (either on PCH or on SoC for platforms without PCH). + */ + u32 mmio_base; + + /* + * gmbus.mutex protects against concurrent usage of the single + * hw gmbus controller on different i2c buses. + */ + struct mutex mutex; + + struct intel_gmbus *bus[GMBUS_NUM_PINS]; + + wait_queue_head_t wait_queue; + } gmbus; + + struct { + struct i915_hdcp_comp_master *master; + bool comp_added; + + /* Mutex to protect the above hdcp component related values. */ + struct mutex comp_mutex; + } hdcp; + + struct { + struct i915_power_domains domains; + + /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ + u32 chv_phy_control; + + /* perform PHY state sanity checks? */ + bool chv_phy_assert[2]; + } power; + + struct { + u32 mmio_base; + + /* protects panel power sequencer state */ + struct mutex mutex; + } pps; + + struct { + struct drm_property *broadcast_rgb; + struct drm_property *force_audio; + } properties; + + struct { + unsigned long mask; + } quirks; + + struct { + enum { + I915_SAGV_UNKNOWN = 0, + I915_SAGV_DISABLED, + I915_SAGV_ENABLED, + I915_SAGV_NOT_CONTROLLED + } status; + + u32 block_time_us; + } sagv; + + struct { + /* ordered wq for modesets */ + struct workqueue_struct *modeset; + + /* unbound hipri wq for page flips/plane updates */ + struct workqueue_struct *flip; + } wq; + + /* Grouping using named structs. Keep sorted. */ + struct intel_audio audio; + struct intel_dmc dmc; + struct intel_dpll dpll; + struct intel_fbc *fbc[I915_MAX_FBCS]; + struct intel_frontbuffer_tracking fb_tracking; + struct intel_hotplug hotplug; + struct intel_opregion opregion; + struct intel_overlay *overlay; + struct intel_vbt_data vbt; + struct intel_wm wm; +}; + +#endif /* __INTEL_DISPLAY_CORE_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 6c3954479047..7c7253a2541c 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -26,6 +26,7 @@ #include "intel_pm.h" #include "intel_psr.h" #include "intel_sprite.h" +#include "skl_watermark.h" static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) { @@ -37,10 +38,10 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) struct drm_i915_private *dev_priv = node_to_i915(m->private); seq_printf(m, "FB tracking busy bits: 0x%08x\n", - dev_priv->fb_tracking.busy_bits); + dev_priv->display.fb_tracking.busy_bits); seq_printf(m, "FB tracking flip bits: 0x%08x\n", - dev_priv->fb_tracking.flip_bits); + dev_priv->display.fb_tracking.flip_bits); return 0; } @@ -103,7 +104,8 @@ static int i915_sr_status(struct seq_file *m, void *unused) static int i915_opregion(struct seq_file *m, void *unused) { - struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; + struct drm_i915_private *i915 = node_to_i915(m->private); + struct intel_opregion *opregion = &i915->display.opregion; if (opregion->header) seq_write(m, opregion->header, OPREGION_SIZE); @@ -113,7 +115,8 @@ static int i915_opregion(struct seq_file *m, void *unused) static int i915_vbt(struct seq_file *m, void *unused) { - struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; + struct drm_i915_private *i915 = node_to_i915(m->private); + struct intel_opregion *opregion = &i915->display.opregion; if (opregion->vbt) seq_write(m, opregion->vbt, opregion->vbt_size); @@ -129,7 +132,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) struct drm_framebuffer *drm_fb; #ifdef CONFIG_DRM_FBDEV_EMULATION - fbdev_fb = intel_fbdev_framebuffer(dev_priv->fbdev); + fbdev_fb = intel_fbdev_framebuffer(dev_priv->display.fbdev.fbdev); if (fbdev_fb) { seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", fbdev_fb->base.width, @@ -722,10 +725,11 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc) /* Not all platformas have a scaler */ if (num_scalers) { - seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d", + seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d scaling_filter=%d", num_scalers, crtc_state->scaler_state.scaler_users, - crtc_state->scaler_state.scaler_id); + crtc_state->scaler_state.scaler_id, + crtc_state->hw.scaling_filter); for (i = 0; i < num_scalers; i++) { const struct intel_scaler *sc = @@ -932,11 +936,11 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused) drm_modeset_lock_all(dev); seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n", - dev_priv->dpll.ref_clks.nssc, - dev_priv->dpll.ref_clks.ssc); + dev_priv->display.dpll.ref_clks.nssc, + dev_priv->display.dpll.ref_clks.ssc); - for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { - struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i]; + for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) { + struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i]; seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name, pll->info->id); @@ -979,58 +983,6 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused) return 0; } -static int i915_ipc_status_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - - seq_printf(m, "Isochronous Priority Control: %s\n", - str_yes_no(dev_priv->ipc_enabled)); - return 0; -} - -static int i915_ipc_status_open(struct inode *inode, struct file *file) -{ - struct drm_i915_private *dev_priv = inode->i_private; - - if (!HAS_IPC(dev_priv)) - return -ENODEV; - - return single_open(file, i915_ipc_status_show, dev_priv); -} - -static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf, - size_t len, loff_t *offp) -{ - struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - intel_wakeref_t wakeref; - bool enable; - int ret; - - ret = kstrtobool_from_user(ubuf, len, &enable); - if (ret < 0) - return ret; - - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { - if (!dev_priv->ipc_enabled && enable) - drm_info(&dev_priv->drm, - "Enabling IPC: WM will be proper only after next commit\n"); - dev_priv->ipc_enabled = enable; - intel_enable_ipc(dev_priv); - } - - return len; -} - -static const struct file_operations i915_ipc_status_fops = { - .owner = THIS_MODULE, - .open = i915_ipc_status_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = i915_ipc_status_write -}; - static int i915_ddb_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -1427,9 +1379,9 @@ static int pri_wm_latency_show(struct seq_file *m, void *data) const u16 *latencies; if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->wm.skl_latency; + latencies = dev_priv->display.wm.skl_latency; else - latencies = dev_priv->wm.pri_latency; + latencies = dev_priv->display.wm.pri_latency; wm_latency_show(m, latencies); @@ -1442,9 +1394,9 @@ static int spr_wm_latency_show(struct seq_file *m, void *data) const u16 *latencies; if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->wm.skl_latency; + latencies = dev_priv->display.wm.skl_latency; else - latencies = dev_priv->wm.spr_latency; + latencies = dev_priv->display.wm.spr_latency; wm_latency_show(m, latencies); @@ -1457,9 +1409,9 @@ static int cur_wm_latency_show(struct seq_file *m, void *data) const u16 *latencies; if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->wm.skl_latency; + latencies = dev_priv->display.wm.skl_latency; else - latencies = dev_priv->wm.cur_latency; + latencies = dev_priv->display.wm.cur_latency; wm_latency_show(m, latencies); @@ -1550,9 +1502,9 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, u16 *latencies; if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->wm.skl_latency; + latencies = dev_priv->display.wm.skl_latency; else - latencies = dev_priv->wm.pri_latency; + latencies = dev_priv->display.wm.pri_latency; return wm_latency_write(file, ubuf, len, offp, latencies); } @@ -1565,9 +1517,9 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, u16 *latencies; if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->wm.skl_latency; + latencies = dev_priv->display.wm.skl_latency; else - latencies = dev_priv->wm.spr_latency; + latencies = dev_priv->display.wm.spr_latency; return wm_latency_write(file, ubuf, len, offp, latencies); } @@ -1580,9 +1532,9 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, u16 *latencies; if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->wm.skl_latency; + latencies = dev_priv->display.wm.skl_latency; else - latencies = dev_priv->wm.cur_latency; + latencies = dev_priv->display.wm.cur_latency; return wm_latency_write(file, ubuf, len, offp, latencies); } @@ -1617,14 +1569,14 @@ static const struct file_operations i915_cur_wm_latency_fops = { static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = m->private; - struct i915_hotplug *hotplug = &dev_priv->hotplug; + struct intel_hotplug *hotplug = &dev_priv->display.hotplug; /* Synchronize with everything first in case there's been an HPD * storm, but we haven't finished handling it in the kernel yet */ intel_synchronize_irq(dev_priv); - flush_work(&dev_priv->hotplug.dig_port_work); - flush_delayed_work(&dev_priv->hotplug.hotplug_work); + flush_work(&dev_priv->display.hotplug.dig_port_work); + flush_delayed_work(&dev_priv->display.hotplug.hotplug_work); seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); seq_printf(m, "Detected: %s\n", @@ -1639,7 +1591,7 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file, { struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; - struct i915_hotplug *hotplug = &dev_priv->hotplug; + struct intel_hotplug *hotplug = &dev_priv->display.hotplug; unsigned int new_threshold; int i; char *newline; @@ -1678,7 +1630,7 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file, spin_unlock_irq(&dev_priv->irq_lock); /* Re-enable hpd immediately if we were in an irq storm */ - flush_delayed_work(&dev_priv->hotplug.reenable_work); + flush_delayed_work(&dev_priv->display.hotplug.reenable_work); return len; } @@ -1702,7 +1654,7 @@ static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data) struct drm_i915_private *dev_priv = m->private; seq_printf(m, "Enabled: %s\n", - str_yes_no(dev_priv->hotplug.hpd_short_storm_enabled)); + str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled)); return 0; } @@ -1720,7 +1672,7 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, { struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; - struct i915_hotplug *hotplug = &dev_priv->hotplug; + struct intel_hotplug *hotplug = &dev_priv->display.hotplug; char *newline; char tmp[16]; int i; @@ -1756,7 +1708,7 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, spin_unlock_irq(&dev_priv->irq_lock); /* Re-enable hpd immediately if we were in an irq storm */ - flush_delayed_work(&dev_priv->hotplug.reenable_work); + flush_delayed_work(&dev_priv->display.hotplug.reenable_work); return len; } @@ -1907,7 +1859,6 @@ static const struct { {"i915_dp_test_active", &i915_displayport_test_active_fops}, {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}, {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops}, - {"i915_ipc_status", &i915_ipc_status_fops}, {"i915_drrs_ctl", &i915_drrs_ctl_fops}, {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}, }; @@ -1931,6 +1882,7 @@ void intel_display_debugfs_register(struct drm_i915_private *i915) intel_dmc_debugfs_register(i915); intel_fbc_debugfs_register(i915); + skl_watermark_ipc_debugfs_register(i915); } static int i915_panel_show(struct seq_file *m, void *data) @@ -2137,7 +2089,7 @@ static const struct file_operations i915_dsc_fec_support_fops = { .write = i915_dsc_fec_support_write }; -static int i915_dsc_bpp_show(struct seq_file *m, void *data) +static int i915_dsc_bpc_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; struct drm_device *dev = connector->dev; @@ -2160,14 +2112,14 @@ static int i915_dsc_bpp_show(struct seq_file *m, void *data) } crtc_state = to_intel_crtc_state(crtc->state); - seq_printf(m, "Compressed_BPP: %d\n", crtc_state->dsc.compressed_bpp); + seq_printf(m, "Input_BPC: %d\n", crtc_state->dsc.config.bits_per_component); out: drm_modeset_unlock(&dev->mode_config.connection_mutex); return ret; } -static ssize_t i915_dsc_bpp_write(struct file *file, +static ssize_t i915_dsc_bpc_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { @@ -2175,33 +2127,32 @@ static ssize_t i915_dsc_bpp_write(struct file *file, ((struct seq_file *)file->private_data)->private; struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - int dsc_bpp = 0; + int dsc_bpc = 0; int ret; - ret = kstrtoint_from_user(ubuf, len, 0, &dsc_bpp); + ret = kstrtoint_from_user(ubuf, len, 0, &dsc_bpc); if (ret < 0) return ret; - intel_dp->force_dsc_bpp = dsc_bpp; + intel_dp->force_dsc_bpc = dsc_bpc; *offp += len; return len; } -static int i915_dsc_bpp_open(struct inode *inode, +static int i915_dsc_bpc_open(struct inode *inode, struct file *file) { - return single_open(file, i915_dsc_bpp_show, - inode->i_private); + return single_open(file, i915_dsc_bpc_show, inode->i_private); } -static const struct file_operations i915_dsc_bpp_fops = { +static const struct file_operations i915_dsc_bpc_fops = { .owner = THIS_MODULE, - .open = i915_dsc_bpp_open, + .open = i915_dsc_bpc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, - .write = i915_dsc_bpp_write + .write = i915_dsc_bpc_write }; /* @@ -2271,8 +2222,8 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector) debugfs_create_file("i915_dsc_fec_support", 0644, root, connector, &i915_dsc_fec_support_fops); - debugfs_create_file("i915_dsc_bpp", 0644, root, - connector, &i915_dsc_bpp_fops); + debugfs_create_file("i915_dsc_bpc", 0644, root, + connector, &i915_dsc_bpc_fops); } if (connector->connector_type == DRM_MODE_CONNECTOR_DSI || diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 589af257edeb..1e608b9e5055 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -7,6 +7,7 @@ #include "i915_drv.h" #include "i915_irq.h" +#include "intel_backlight_regs.h" #include "intel_cdclk.h" #include "intel_combo_phy.h" #include "intel_de.h" @@ -18,8 +19,8 @@ #include "intel_mchbar_regs.h" #include "intel_pch_refclk.h" #include "intel_pcode.h" -#include "intel_pm.h" #include "intel_snps_phy.h" +#include "skl_watermark.h" #include "vlv_sideband.h" #define for_each_power_domain_well(__dev_priv, __power_well, __domain) \ @@ -243,7 +244,7 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, struct i915_power_domains *power_domains; bool ret; - power_domains = &dev_priv->power_domains; + power_domains = &dev_priv->display.power.domains; mutex_lock(&power_domains->lock); ret = __intel_display_power_is_enabled(dev_priv, domain); @@ -268,7 +269,7 @@ sanitize_target_dc_state(struct drm_i915_private *dev_priv, if (target_dc_state != states[i]) continue; - if (dev_priv->dmc.allowed_dc_mask & target_dc_state) + if (dev_priv->display.dmc.allowed_dc_mask & target_dc_state) break; target_dc_state = states[i + 1]; @@ -291,7 +292,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, { struct i915_power_well *power_well; bool dc_off_enabled; - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; mutex_lock(&power_domains->lock); power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF); @@ -301,7 +302,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, state = sanitize_target_dc_state(dev_priv, state); - if (state == dev_priv->dmc.target_dc_state) + if (state == dev_priv->display.dmc.target_dc_state) goto unlock; dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well); @@ -312,7 +313,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, if (!dc_off_enabled) intel_power_well_enable(dev_priv, power_well); - dev_priv->dmc.target_dc_state = state; + dev_priv->display.dmc.target_dc_state = state; if (!dc_off_enabled) intel_power_well_disable(dev_priv, power_well); @@ -339,7 +340,7 @@ assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, - power_domains); + display.power.domains); return !drm_WARN_ON(&i915->drm, bitmap_intersects(power_domains->async_put_domains[0].bits, @@ -352,7 +353,7 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains) { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, - power_domains); + display.power.domains); struct intel_power_domain_mask async_put_mask; enum intel_display_power_domain domain; bool err = false; @@ -375,7 +376,7 @@ static void print_power_domains(struct i915_power_domains *power_domains, { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, - power_domains); + display.power.domains); enum intel_display_power_domain domain; drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM)); @@ -390,7 +391,7 @@ print_async_put_domains_state(struct i915_power_domains *power_domains) { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, - power_domains); + display.power.domains); drm_dbg(&i915->drm, "async_put_wakeref %u\n", power_domains->async_put_wakeref); @@ -445,7 +446,7 @@ static bool intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct intel_power_domain_mask async_put_mask; bool ret = false; @@ -474,7 +475,7 @@ static void __intel_display_power_get_domain(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *power_well; if (intel_display_power_grab_async_put_ref(dev_priv, domain)) @@ -501,7 +502,7 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv, intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&power_domains->lock); @@ -527,7 +528,7 @@ intel_wakeref_t intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; intel_wakeref_t wakeref; bool is_enabled; @@ -563,7 +564,7 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv, const char *name = intel_display_power_domain_str(domain); struct intel_power_domain_mask async_put_mask; - power_domains = &dev_priv->power_domains; + power_domains = &dev_priv->display.power.domains; drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain], "Use count on domain %s is already zero\n", @@ -583,7 +584,7 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv, static void __intel_display_power_put(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; mutex_lock(&power_domains->lock); __intel_display_power_put_domain(dev_priv, domain); @@ -596,7 +597,7 @@ queue_async_put_domains_work(struct i915_power_domains *power_domains, { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, - power_domains); + display.power.domains); drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); power_domains->async_put_wakeref = wakeref; drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq, @@ -610,7 +611,7 @@ release_async_put_domains(struct i915_power_domains *power_domains, { struct drm_i915_private *dev_priv = container_of(power_domains, struct drm_i915_private, - power_domains); + display.power.domains); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; enum intel_display_power_domain domain; intel_wakeref_t wakeref; @@ -637,8 +638,8 @@ intel_display_power_put_async_work(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, - power_domains.async_put_work.work); - struct i915_power_domains *power_domains = &dev_priv->power_domains; + display.power.domains.async_put_work.work); + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); intel_wakeref_t old_work_wakeref = 0; @@ -698,7 +699,7 @@ void __intel_display_power_put_async(struct drm_i915_private *i915, enum intel_display_power_domain domain, intel_wakeref_t wakeref) { - struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_domains *power_domains = &i915->display.power.domains; struct intel_runtime_pm *rpm = &i915->runtime_pm; intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); @@ -746,7 +747,7 @@ out_verify: */ void intel_display_power_flush_work(struct drm_i915_private *i915) { - struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_domains *power_domains = &i915->display.power.domains; struct intel_power_domain_mask async_put_mask; intel_wakeref_t work_wakeref; @@ -779,7 +780,7 @@ out_verify: static void intel_display_power_flush_work_sync(struct drm_i915_private *i915) { - struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_domains *power_domains = &i915->display.power.domains; intel_display_power_flush_work(i915); cancel_delayed_work_sync(&power_domains->async_put_work); @@ -908,7 +909,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, return 0; if (IS_DG2(dev_priv)) - max_dc = 0; + max_dc = 1; else if (IS_DG1(dev_priv)) max_dc = 3; else if (DISPLAY_VER(dev_priv) >= 12) @@ -976,15 +977,15 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, */ int intel_power_domains_init(struct drm_i915_private *dev_priv) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; dev_priv->params.disable_power_well = sanitize_disable_power_well_option(dev_priv, dev_priv->params.disable_power_well); - dev_priv->dmc.allowed_dc_mask = + dev_priv->display.dmc.allowed_dc_mask = get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); - dev_priv->dmc.target_dc_state = + dev_priv->display.dmc.target_dc_state = sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); mutex_init(&power_domains->lock); @@ -1003,12 +1004,12 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) */ void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) { - intel_display_power_map_cleanup(&dev_priv->power_domains); + intel_display_power_map_cleanup(&dev_priv->display.power.domains); } static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *power_well; mutex_lock(&power_domains->lock); @@ -1037,7 +1038,7 @@ static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv, void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, u8 req_slices) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; u8 slice_mask = INTEL_INFO(dev_priv)->display.dbuf.slice_mask; enum dbuf_slice slice; @@ -1060,14 +1061,14 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, for_each_dbuf_slice(dev_priv, slice) gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice)); - dev_priv->dbuf.enabled_slices = req_slices; + dev_priv->display.dbuf.enabled_slices = req_slices; mutex_unlock(&power_domains->lock); } static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) { - dev_priv->dbuf.enabled_slices = + dev_priv->display.dbuf.enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv); /* @@ -1075,7 +1076,7 @@ static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) * figure out later which slices we have and what we need. */ gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) | - dev_priv->dbuf.enabled_slices); + dev_priv->display.dbuf.enabled_slices); } static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) @@ -1101,7 +1102,7 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv) unsigned long abox_regs = INTEL_INFO(dev_priv)->display.abox_mask; u32 mask, val, i; - if (IS_ALDERLAKE_P(dev_priv)) + if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) return; mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK | @@ -1309,7 +1310,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); intel_update_cdclk(dev_priv); - intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK"); + intel_cdclk_dump_config(dev_priv, &dev_priv->display.cdclk.hw, "Current CDCLK"); } /* @@ -1381,6 +1382,9 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, reset_bits = RESET_PCH_HANDSHAKE_ENABLE; } + if (DISPLAY_VER(dev_priv) >= 14) + reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN; + val = intel_de_read(dev_priv, reg); if (enable) @@ -1394,7 +1398,7 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, static void skl_display_core_init(struct drm_i915_private *dev_priv, bool resume) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *well; gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); @@ -1426,13 +1430,14 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv, static void skl_display_core_uninit(struct drm_i915_private *dev_priv) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *well; if (!HAS_DISPLAY(dev_priv)) return; gen9_disable_dc_states(dev_priv); + /* TODO: disable DMC program */ gen9_dbuf_disable(dev_priv); @@ -1459,7 +1464,7 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv) static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *well; gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); @@ -1493,13 +1498,14 @@ static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume static void bxt_display_core_uninit(struct drm_i915_private *dev_priv) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *well; if (!HAS_DISPLAY(dev_priv)) return; gen9_disable_dc_states(dev_priv); + /* TODO: disable DMC program */ gen9_dbuf_disable(dev_priv); @@ -1601,7 +1607,7 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) static void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *well; u32 val; @@ -1668,13 +1674,14 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, static void icl_display_core_uninit(struct drm_i915_private *dev_priv) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *well; if (!HAS_DISPLAY(dev_priv)) return; gen9_disable_dc_states(dev_priv); + intel_dmc_disable_program(dev_priv); /* 1. Disable all display engine functions -> aready done */ @@ -1712,7 +1719,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv) * power well state and lane status to reconstruct the * expected initial value. */ - dev_priv->chv_phy_control = + dev_priv->display.power.chv_phy_control = PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | @@ -1734,27 +1741,27 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv) if (mask == 0xf) mask = 0x0; else - dev_priv->chv_phy_control |= + dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); - dev_priv->chv_phy_control |= + dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); mask = (status & DPLL_PORTC_READY_MASK) >> 4; if (mask == 0xf) mask = 0x0; else - dev_priv->chv_phy_control |= + dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); - dev_priv->chv_phy_control |= + dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); - dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); + dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); - dev_priv->chv_phy_assert[DPIO_PHY0] = false; + dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = false; } else { - dev_priv->chv_phy_assert[DPIO_PHY0] = true; + dev_priv->display.power.chv_phy_assert[DPIO_PHY0] = true; } if (intel_power_well_is_enabled(dev_priv, cmn_d)) { @@ -1766,21 +1773,21 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv) if (mask == 0xf) mask = 0x0; else - dev_priv->chv_phy_control |= + dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); - dev_priv->chv_phy_control |= + dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); - dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); + dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); - dev_priv->chv_phy_assert[DPIO_PHY1] = false; + dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = false; } else { - dev_priv->chv_phy_assert[DPIO_PHY1] = true; + dev_priv->display.power.chv_phy_assert[DPIO_PHY1] = true; } drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n", - dev_priv->chv_phy_control); + dev_priv->display.power.chv_phy_control); /* Defer application of initial phy_control to enabling the powerwell */ } @@ -1864,7 +1871,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); */ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) { - struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_domains *power_domains = &i915->display.power.domains; power_domains->initializing = true; @@ -1905,8 +1912,8 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) /* Disable power support if the user asked so. */ if (!i915->params.disable_power_well) { drm_WARN_ON(&i915->drm, power_domains->disable_wakeref); - i915->power_domains.disable_wakeref = intel_display_power_get(i915, - POWER_DOMAIN_INIT); + i915->display.power.domains.disable_wakeref = intel_display_power_get(i915, + POWER_DOMAIN_INIT); } intel_power_domains_sync_hw(i915); @@ -1927,12 +1934,12 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) void intel_power_domains_driver_remove(struct drm_i915_private *i915) { intel_wakeref_t wakeref __maybe_unused = - fetch_and_zero(&i915->power_domains.init_wakeref); + fetch_and_zero(&i915->display.power.domains.init_wakeref); /* Remove the refcount we took to keep power well support disabled. */ if (!i915->params.disable_power_well) intel_display_power_put(i915, POWER_DOMAIN_INIT, - fetch_and_zero(&i915->power_domains.disable_wakeref)); + fetch_and_zero(&i915->display.power.domains.disable_wakeref)); intel_display_power_flush_work_sync(i915); @@ -1954,7 +1961,7 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915) */ void intel_power_domains_sanitize_state(struct drm_i915_private *i915) { - struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_domains *power_domains = &i915->display.power.domains; struct i915_power_well *power_well; mutex_lock(&power_domains->lock); @@ -1988,7 +1995,7 @@ void intel_power_domains_sanitize_state(struct drm_i915_private *i915) void intel_power_domains_enable(struct drm_i915_private *i915) { intel_wakeref_t wakeref __maybe_unused = - fetch_and_zero(&i915->power_domains.init_wakeref); + fetch_and_zero(&i915->display.power.domains.init_wakeref); intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); intel_power_domains_verify_state(i915); @@ -2003,7 +2010,7 @@ void intel_power_domains_enable(struct drm_i915_private *i915) */ void intel_power_domains_disable(struct drm_i915_private *i915) { - struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_domains *power_domains = &i915->display.power.domains; drm_WARN_ON(&i915->drm, power_domains->init_wakeref); power_domains->init_wakeref = @@ -2026,7 +2033,7 @@ void intel_power_domains_disable(struct drm_i915_private *i915) void intel_power_domains_suspend(struct drm_i915_private *i915, enum i915_drm_suspend_mode suspend_mode) { - struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_domains *power_domains = &i915->display.power.domains; intel_wakeref_t wakeref __maybe_unused = fetch_and_zero(&power_domains->init_wakeref); @@ -2039,7 +2046,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, * resources as required and also enable deeper system power states * that would be blocked if the firmware was inactive. */ - if (!(i915->dmc.allowed_dc_mask & DC_STATE_EN_DC9) && + if (!(i915->display.dmc.allowed_dc_mask & DC_STATE_EN_DC9) && suspend_mode == I915_DRM_SUSPEND_IDLE && intel_dmc_has_payload(i915)) { intel_display_power_flush_work(i915); @@ -2053,7 +2060,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, */ if (!i915->params.disable_power_well) intel_display_power_put(i915, POWER_DOMAIN_INIT, - fetch_and_zero(&i915->power_domains.disable_wakeref)); + fetch_and_zero(&i915->display.power.domains.disable_wakeref)); intel_display_power_flush_work(i915); intel_power_domains_verify_state(i915); @@ -2080,7 +2087,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, */ void intel_power_domains_resume(struct drm_i915_private *i915) { - struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_domains *power_domains = &i915->display.power.domains; if (power_domains->display_core_suspended) { intel_power_domains_init_hw(i915, true); @@ -2098,7 +2105,7 @@ void intel_power_domains_resume(struct drm_i915_private *i915) static void intel_power_domains_dump_info(struct drm_i915_private *i915) { - struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_domains *power_domains = &i915->display.power.domains; struct i915_power_well *power_well; for_each_power_well(i915, power_well) { @@ -2126,7 +2133,7 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915) */ static void intel_power_domains_verify_state(struct drm_i915_private *i915) { - struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_domains *power_domains = &i915->display.power.domains; struct i915_power_well *power_well; bool dump_domain_info; @@ -2232,10 +2239,10 @@ void intel_display_power_resume(struct drm_i915_private *i915) bxt_disable_dc9(i915); icl_display_core_init(i915, true); if (intel_dmc_has_payload(i915)) { - if (i915->dmc.allowed_dc_mask & + if (i915->display.dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC6) skl_enable_dc6(i915); - else if (i915->dmc.allowed_dc_mask & + else if (i915->display.dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5) gen9_enable_dc5(i915); } @@ -2243,7 +2250,7 @@ void intel_display_power_resume(struct drm_i915_private *i915) bxt_disable_dc9(i915); bxt_display_core_init(i915, true); if (intel_dmc_has_payload(i915) && - (i915->dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) + (i915->display.dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) gen9_enable_dc5(i915); } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_disable_pc8(i915); @@ -2252,7 +2259,7 @@ void intel_display_power_resume(struct drm_i915_private *i915) void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m) { - struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_domains *power_domains = &i915->display.power.domains; int i; mutex_lock(&power_domains->lock); diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index 97b367f39f35..dc04afc6cc8f 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -1350,6 +1350,117 @@ static const struct i915_power_well_desc_list xelpd_power_wells[] = { I915_PW_DESCRIPTORS(xelpd_power_wells_main), }; +/* + * MTL is based on XELPD power domains with the exception of power gating for: + * - DDI_IO (moved to PLL logic) + * - AUX and AUX_IO functionality and register access for USBC1-4 (PICA always-on) + */ +#define XELPDP_PW_2_POWER_DOMAINS \ + XELPD_PW_B_POWER_DOMAINS, \ + XELPD_PW_C_POWER_DOMAINS, \ + XELPD_PW_D_POWER_DOMAINS, \ + POWER_DOMAIN_AUDIO_PLAYBACK, \ + POWER_DOMAIN_VGA, \ + POWER_DOMAIN_PORT_DDI_LANES_TC1, \ + POWER_DOMAIN_PORT_DDI_LANES_TC2, \ + POWER_DOMAIN_PORT_DDI_LANES_TC3, \ + POWER_DOMAIN_PORT_DDI_LANES_TC4 + +I915_DECL_PW_DOMAINS(xelpdp_pwdoms_pw_2, + XELPDP_PW_2_POWER_DOMAINS, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(xelpdp_pwdoms_dc_off, + XELPDP_PW_2_POWER_DOMAINS, + POWER_DOMAIN_AUDIO_MMIO, + POWER_DOMAIN_MODESET, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_INIT); + +I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc1, + POWER_DOMAIN_AUX_USBC1, + POWER_DOMAIN_AUX_TBT1); + +I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc2, + POWER_DOMAIN_AUX_USBC2, + POWER_DOMAIN_AUX_TBT2); + +I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc3, + POWER_DOMAIN_AUX_USBC3, + POWER_DOMAIN_AUX_TBT3); + +I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc4, + POWER_DOMAIN_AUX_USBC4, + POWER_DOMAIN_AUX_TBT4); + +static const struct i915_power_well_desc xelpdp_power_wells_main[] = { + { + .instances = &I915_PW_INSTANCES( + I915_PW("DC_off", &xelpdp_pwdoms_dc_off, + .id = SKL_DISP_DC_OFF), + ), + .ops = &gen9_dc_off_power_well_ops, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_2", &xelpdp_pwdoms_pw_2, + .hsw.idx = ICL_PW_CTL_IDX_PW_2, + .id = SKL_DISP_PW_2), + ), + .ops = &hsw_power_well_ops, + .has_vga = true, + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_A", &xelpd_pwdoms_pw_a, + .hsw.idx = XELPD_PW_CTL_IDX_PW_A), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_A), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_B", &xelpd_pwdoms_pw_b, + .hsw.idx = XELPD_PW_CTL_IDX_PW_B), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_B), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_C", &xelpd_pwdoms_pw_c, + .hsw.idx = XELPD_PW_CTL_IDX_PW_C), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_C), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_D", &xelpd_pwdoms_pw_d, + .hsw.idx = XELPD_PW_CTL_IDX_PW_D), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_D), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A), + I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B), + I915_PW("AUX_TC1", &xelpdp_pwdoms_aux_tc1, .xelpdp.aux_ch = AUX_CH_USBC1), + I915_PW("AUX_TC2", &xelpdp_pwdoms_aux_tc2, .xelpdp.aux_ch = AUX_CH_USBC2), + I915_PW("AUX_TC3", &xelpdp_pwdoms_aux_tc3, .xelpdp.aux_ch = AUX_CH_USBC3), + I915_PW("AUX_TC4", &xelpdp_pwdoms_aux_tc4, .xelpdp.aux_ch = AUX_CH_USBC4), + ), + .ops = &xelpdp_aux_power_well_ops, + }, +}; + +static const struct i915_power_well_desc_list xelpdp_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(icl_power_wells_pw_1), + I915_PW_DESCRIPTORS(xelpdp_power_wells_main), +}; + static void init_power_well_domains(const struct i915_power_well_instance *inst, struct i915_power_well *power_well) { @@ -1388,7 +1499,7 @@ __set_power_wells(struct i915_power_domains *power_domains, { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, - power_domains); + display.power.domains); u64 power_well_ids = 0; const struct i915_power_well_desc_list *desc_list; const struct i915_power_well_desc *desc; @@ -1447,7 +1558,7 @@ int intel_display_power_map_init(struct i915_power_domains *power_domains) { struct drm_i915_private *i915 = container_of(power_domains, struct drm_i915_private, - power_domains); + display.power.domains); /* * The enabling order will be from lower to higher indexed wells, * the disabling order is reversed. @@ -1457,7 +1568,9 @@ int intel_display_power_map_init(struct i915_power_domains *power_domains) return 0; } - if (DISPLAY_VER(i915) >= 13) + if (DISPLAY_VER(i915) >= 14) + return set_power_wells(power_domains, xelpdp_power_wells); + else if (DISPLAY_VER(i915) >= 13) return set_power_wells(power_domains, xelpd_power_wells); else if (IS_DG1(i915)) return set_power_wells(power_domains, dg1_power_wells); diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index 91cfd5890f46..df7ee4969ef1 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -5,6 +5,7 @@ #include "i915_drv.h" #include "i915_irq.h" +#include "intel_backlight_regs.h" #include "intel_combo_phy.h" #include "intel_combo_phy_regs.h" #include "intel_crt.h" @@ -16,10 +17,10 @@ #include "intel_dpll.h" #include "intel_hotplug.h" #include "intel_pcode.h" -#include "intel_pm.h" #include "intel_pps.h" #include "intel_tc.h" #include "intel_vga.h" +#include "skl_watermark.h" #include "vlv_sideband.h" #include "vlv_sideband_reg.h" @@ -84,7 +85,7 @@ lookup_power_well(struct drm_i915_private *i915, drm_WARN(&i915->drm, 1, "Power well %d not defined for this platform\n", power_well_id); - return &i915->power_domains.power_wells[0]; + return &i915->display.power.domains.power_wells[0]; } void intel_power_well_enable(struct drm_i915_private *i915, @@ -710,8 +711,8 @@ void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) drm_dbg_kms(&dev_priv->drm, "Resetting DC state tracking from %02x to %02x\n", - dev_priv->dmc.dc_state, val); - dev_priv->dmc.dc_state = val; + dev_priv->display.dmc.dc_state, val); + dev_priv->display.dmc.dc_state = val; } /** @@ -746,8 +747,8 @@ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) return; if (drm_WARN_ON_ONCE(&dev_priv->drm, - state & ~dev_priv->dmc.allowed_dc_mask)) - state &= dev_priv->dmc.allowed_dc_mask; + state & ~dev_priv->display.dmc.allowed_dc_mask)) + state &= dev_priv->display.dmc.allowed_dc_mask; val = intel_de_read(dev_priv, DC_STATE_EN); mask = gen9_dc_mask(dev_priv); @@ -755,16 +756,16 @@ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) val & mask, state); /* Check if DMC is ignoring our DC state requests */ - if ((val & mask) != dev_priv->dmc.dc_state) + if ((val & mask) != dev_priv->display.dmc.dc_state) drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", - dev_priv->dmc.dc_state, val & mask); + dev_priv->display.dmc.dc_state, val & mask); val &= ~mask; val |= state; gen9_write_dc_state(dev_priv, val); - dev_priv->dmc.dc_state = val & mask; + dev_priv->display.dmc.dc_state = val & mask; } static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) @@ -945,7 +946,7 @@ static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) { u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv); - u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices; + u8 enabled_dbuf_slices = dev_priv->display.dbuf.enabled_slices; drm_WARN(&dev_priv->drm, hw_enabled_dbuf_slices != enabled_dbuf_slices, @@ -958,7 +959,7 @@ void gen9_disable_dc_states(struct drm_i915_private *dev_priv) { struct intel_cdclk_config cdclk_config = {}; - if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) { + if (dev_priv->display.dmc.target_dc_state == DC_STATE_EN_DC3CO) { tgl_disable_dc3co(dev_priv); return; } @@ -971,7 +972,7 @@ void gen9_disable_dc_states(struct drm_i915_private *dev_priv) intel_cdclk_get_cdclk(dev_priv, &cdclk_config); /* Can't read out voltage_level so can't use intel_cdclk_changed() */ drm_WARN_ON(&dev_priv->drm, - intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, + intel_cdclk_needs_modeset(&dev_priv->display.cdclk.hw, &cdclk_config)); gen9_assert_dbuf_enabled(dev_priv); @@ -1000,7 +1001,7 @@ static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, if (!intel_dmc_has_payload(dev_priv)) return; - switch (dev_priv->dmc.target_dc_state) { + switch (dev_priv->display.dmc.target_dc_state) { case DC_STATE_EN_DC3CO: tgl_enable_dc3co(dev_priv); break; @@ -1156,10 +1157,10 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) * (and never recovering) in this case. intel_dsi_post_disable() will * clear it when we turn off the display. */ - val = intel_de_read(dev_priv, DSPCLK_GATE_D); + val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv)); val &= DPOUNIT_CLOCK_GATE_DISABLE; val |= VRHUNIT_CLOCK_GATE_DISABLE; - intel_de_write(dev_priv, DSPCLK_GATE_D, val); + intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val); /* * Disable trickle feed and enable pnd deadline calculation @@ -1207,7 +1208,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) * During driver initialization/resume we can avoid restoring the * part of the HW/SW state that will be inited anyway explicitly. */ - if (dev_priv->power_domains.initializing) + if (dev_priv->display.power.domains.initializing) return; intel_hpd_init(dev_priv); @@ -1302,7 +1303,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); struct i915_power_well *cmn_d = lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); - u32 phy_control = dev_priv->chv_phy_control; + u32 phy_control = dev_priv->display.power.chv_phy_control; u32 phy_status = 0; u32 phy_status_mask = 0xffffffff; @@ -1313,7 +1314,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) * reset (ie. the power well has been disabled at * least once). */ - if (!dev_priv->chv_phy_assert[DPIO_PHY0]) + if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY0]) phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | @@ -1321,7 +1322,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); - if (!dev_priv->chv_phy_assert[DPIO_PHY1]) + if (!dev_priv->display.power.chv_phy_assert[DPIO_PHY1]) phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); @@ -1397,7 +1398,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) drm_err(&dev_priv->drm, "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask, - phy_status, dev_priv->chv_phy_control); + phy_status, dev_priv->display.power.chv_phy_control); } #undef BITS_SET @@ -1457,13 +1458,13 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, vlv_dpio_put(dev_priv); - dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); + dev_priv->display.power.chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->chv_phy_control); + dev_priv->display.power.chv_phy_control); drm_dbg_kms(&dev_priv->drm, "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", - phy, dev_priv->chv_phy_control); + phy, dev_priv->display.power.chv_phy_control); assert_chv_phy_status(dev_priv); } @@ -1487,18 +1488,18 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, assert_pll_disabled(dev_priv, PIPE_C); } - dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); + dev_priv->display.power.chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->chv_phy_control); + dev_priv->display.power.chv_phy_control); vlv_set_power_well(dev_priv, power_well, false); drm_dbg_kms(&dev_priv->drm, "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", - phy, dev_priv->chv_phy_control); + phy, dev_priv->display.power.chv_phy_control); /* PHY is fully reset now, so we can enable the PHY state asserts */ - dev_priv->chv_phy_assert[phy] = true; + dev_priv->display.power.chv_phy_assert[phy] = true; assert_chv_phy_status(dev_priv); } @@ -1516,7 +1517,7 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi * reset (ie. the power well has been disabled at * least once). */ - if (!dev_priv->chv_phy_assert[phy]) + if (!dev_priv->display.power.chv_phy_assert[phy]) return; if (ch == DPIO_CH0) @@ -1570,27 +1571,27 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, enum dpio_channel ch, bool override) { - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; bool was_override; mutex_lock(&power_domains->lock); - was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + was_override = dev_priv->display.power.chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); if (override == was_override) goto out; if (override) - dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); else - dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->chv_phy_control); + dev_priv->display.power.chv_phy_control); drm_dbg_kms(&dev_priv->drm, "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", - phy, ch, dev_priv->chv_phy_control); + phy, ch, dev_priv->display.power.chv_phy_control); assert_chv_phy_status(dev_priv); @@ -1604,26 +1605,26 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder, bool override, unsigned int mask) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); mutex_lock(&power_domains->lock); - dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); - dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); + dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); + dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); if (override) - dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + dev_priv->display.power.chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); else - dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + dev_priv->display.power.chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->chv_phy_control); + dev_priv->display.power.chv_phy_control); drm_dbg_kms(&dev_priv->drm, "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", - phy, ch, mask, dev_priv->chv_phy_control); + phy, ch, mask, dev_priv->display.power.chv_phy_control); assert_chv_phy_status(dev_priv); @@ -1701,7 +1702,7 @@ static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, - dev_priv->chv_phy_control); + dev_priv->display.power.chv_phy_control); } static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, @@ -1797,6 +1798,43 @@ tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv, return intel_power_well_refcount(power_well); } +static void xelpdp_aux_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch; + + intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch), + XELPDP_DP_AUX_CH_CTL_POWER_REQUEST, + XELPDP_DP_AUX_CH_CTL_POWER_REQUEST); + + /* + * The power status flag cannot be used to determine whether aux + * power wells have finished powering up. Instead we're + * expected to just wait a fixed 600us after raising the request + * bit. + */ + usleep_range(600, 1200); +} + +static void xelpdp_aux_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch; + + intel_de_rmw(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch), + XELPDP_DP_AUX_CH_CTL_POWER_REQUEST, + 0); + usleep_range(10, 30); +} + +static bool xelpdp_aux_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch; + + return intel_de_read(dev_priv, XELPDP_DP_AUX_CH_CTL(aux_ch)) & + XELPDP_DP_AUX_CH_CTL_POWER_STATUS; +} const struct i915_power_well_ops i9xx_always_on_power_well_ops = { .sync_hw = i9xx_power_well_sync_hw_noop, @@ -1910,3 +1948,10 @@ const struct i915_power_well_ops tgl_tc_cold_off_ops = { .disable = tgl_tc_cold_off_power_well_disable, .is_enabled = tgl_tc_cold_off_power_well_is_enabled, }; + +const struct i915_power_well_ops xelpdp_aux_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = xelpdp_aux_power_well_enable, + .disable = xelpdp_aux_power_well_disable, + .is_enabled = xelpdp_aux_power_well_enabled, +}; diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h index d0624642dcb6..e13b521e322a 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.h +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h @@ -14,15 +14,15 @@ struct drm_i915_private; struct i915_power_well; #define for_each_power_well(__dev_priv, __power_well) \ - for ((__power_well) = (__dev_priv)->power_domains.power_wells; \ - (__power_well) - (__dev_priv)->power_domains.power_wells < \ - (__dev_priv)->power_domains.power_well_count; \ + for ((__power_well) = (__dev_priv)->display.power.domains.power_wells; \ + (__power_well) - (__dev_priv)->display.power.domains.power_wells < \ + (__dev_priv)->display.power.domains.power_well_count; \ (__power_well)++) #define for_each_power_well_reverse(__dev_priv, __power_well) \ - for ((__power_well) = (__dev_priv)->power_domains.power_wells + \ - (__dev_priv)->power_domains.power_well_count - 1; \ - (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \ + for ((__power_well) = (__dev_priv)->display.power.domains.power_wells + \ + (__dev_priv)->display.power.domains.power_well_count - 1; \ + (__power_well) - (__dev_priv)->display.power.domains.power_wells >= 0; \ (__power_well)--) /* @@ -80,6 +80,9 @@ struct i915_power_well_instance { */ u8 idx; } hsw; + struct { + u8 aux_ch; + } xelpdp; }; }; @@ -169,5 +172,6 @@ extern const struct i915_power_well_ops vlv_dpio_power_well_ops; extern const struct i915_power_well_ops icl_aux_power_well_ops; extern const struct i915_power_well_ops icl_ddi_power_well_ops; extern const struct i915_power_well_ops tgl_tc_cold_off_ops; +extern const struct i915_power_well_ops xelpdp_aux_power_well_ops; #endif diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 01977cd237eb..298d00a11f47 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -1130,6 +1130,7 @@ struct intel_crtc_state { /* m2_n2 for eDP downclock */ struct intel_link_m_n dp_m2_n2; bool has_drrs; + bool seamless_m_n; /* PSR is supported but might not be enabled due the lack of enabled planes */ bool has_psr; @@ -1712,7 +1713,7 @@ struct intel_dp { /* Display stream compression testing */ bool force_dsc_en; - int force_dsc_bpp; + int force_dsc_bpc; bool hobl_failed; bool hobl_active; diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index fa9ef591b885..e52ecc0738a6 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -52,8 +52,8 @@ #define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE -#define DG2_DMC_PATH DMC_PATH(dg2, 2, 06) -#define DG2_DMC_VERSION_REQUIRED DMC_VERSION(2, 06) +#define DG2_DMC_PATH DMC_PATH(dg2, 2, 07) +#define DG2_DMC_VERSION_REQUIRED DMC_VERSION(2, 07) MODULE_FIRMWARE(DG2_DMC_PATH); #define ADLP_DMC_PATH DMC_PATH(adlp, 2, 16) @@ -250,7 +250,7 @@ struct stepping_info { static bool has_dmc_id_fw(struct drm_i915_private *i915, int dmc_id) { - return i915->dmc.dmc_info[dmc_id].payload; + return i915->display.dmc.dmc_info[dmc_id].payload; } bool intel_dmc_has_payload(struct drm_i915_private *i915) @@ -277,6 +277,17 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv) intel_de_posting_read(dev_priv, DC_STATE_DEBUG); } +static void disable_event_handler(struct drm_i915_private *i915, + i915_reg_t ctl_reg, i915_reg_t htp_reg) +{ + intel_de_write(i915, ctl_reg, + REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, + DMC_EVT_CTL_TYPE_EDGE_0_1) | + REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, + DMC_EVT_CTL_EVENT_ID_FALSE)); + intel_de_write(i915, htp_reg, 0); +} + static void disable_flip_queue_event(struct drm_i915_private *i915, i915_reg_t ctl_reg, i915_reg_t htp_reg) @@ -299,12 +310,7 @@ disable_flip_queue_event(struct drm_i915_private *i915, return; } - intel_de_write(i915, ctl_reg, - REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, - DMC_EVT_CTL_TYPE_EDGE_0_1) | - REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK, - DMC_EVT_CTL_EVENT_ID_FALSE)); - intel_de_write(i915, htp_reg, 0); + disable_event_handler(i915, ctl_reg, htp_reg); } static bool @@ -356,6 +362,51 @@ disable_all_flip_queue_events(struct drm_i915_private *i915) } } +static void disable_all_event_handlers(struct drm_i915_private *i915) +{ + int id; + + /* TODO: disable the event handlers on pre-GEN12 platforms as well */ + if (DISPLAY_VER(i915) < 12) + return; + + for (id = DMC_FW_MAIN; id < DMC_FW_MAX; id++) { + int handler; + + if (!has_dmc_id_fw(i915, id)) + continue; + + for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++) + disable_event_handler(i915, + DMC_EVT_CTL(i915, id, handler), + DMC_EVT_HTP(i915, id, handler)); + } +} + +static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) +{ + enum pipe pipe; + + if (DISPLAY_VER(i915) != 13) + return; + + /* + * Wa_16015201720:adl-p,dg2 + * The WA requires clock gating to be disabled all the time + * for pipe A and B. + * For pipe C and D clock gating needs to be disabled only + * during initializing the firmware. + */ + if (enable) + for (pipe = PIPE_A; pipe <= PIPE_D; pipe++) + intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe), + 0, PIPEDMC_GATING_DIS); + else + for (pipe = PIPE_C; pipe <= PIPE_D; pipe++) + intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe), + PIPEDMC_GATING_DIS, 0); +} + /** * intel_dmc_load_program() - write the firmware from memory to register. * @dev_priv: i915 drm device. @@ -366,12 +417,16 @@ disable_all_flip_queue_events(struct drm_i915_private *i915) */ void intel_dmc_load_program(struct drm_i915_private *dev_priv) { - struct intel_dmc *dmc = &dev_priv->dmc; + struct intel_dmc *dmc = &dev_priv->display.dmc; u32 id, i; if (!intel_dmc_has_payload(dev_priv)) return; + pipedmc_clock_gating_wa(dev_priv, true); + + disable_all_event_handlers(dev_priv); + assert_rpm_wakelock_held(&dev_priv->runtime_pm); preempt_disable(); @@ -393,7 +448,7 @@ void intel_dmc_load_program(struct drm_i915_private *dev_priv) } } - dev_priv->dmc.dc_state = 0; + dev_priv->display.dmc.dc_state = 0; gen9_set_dc_state_debugmask(dev_priv); @@ -403,12 +458,31 @@ void intel_dmc_load_program(struct drm_i915_private *dev_priv) * here. */ disable_all_flip_queue_events(dev_priv); + + pipedmc_clock_gating_wa(dev_priv, false); +} + +/** + * intel_dmc_disable_program() - disable the firmware + * @i915: i915 drm device + * + * Disable all event handlers in the firmware, making sure the firmware is + * inactive after the display is uninitialized. + */ +void intel_dmc_disable_program(struct drm_i915_private *i915) +{ + if (!intel_dmc_has_payload(i915)) + return; + + pipedmc_clock_gating_wa(i915, true); + disable_all_event_handlers(i915); + pipedmc_clock_gating_wa(i915, false); } void assert_dmc_loaded(struct drm_i915_private *i915) { drm_WARN_ONCE(&i915->drm, - !intel_de_read(i915, DMC_PROGRAM(i915->dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), + !intel_de_read(i915, DMC_PROGRAM(i915->display.dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), "DMC program storage start is NULL\n"); drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE), "DMC SSP Base Not fine\n"); @@ -445,7 +519,7 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc, { unsigned int i, id; - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc); + struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); for (i = 0; i < num_entries; i++) { id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id; @@ -473,7 +547,7 @@ static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc, const u32 *mmioaddr, u32 mmio_count, int header_ver, u8 dmc_id) { - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc); + struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); u32 start_range, end_range; int i; @@ -511,7 +585,7 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc, const struct intel_dmc_header_base *dmc_header, size_t rem_size, u8 dmc_id) { - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc); + struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id]; unsigned int header_len_bytes, dmc_header_size, payload_size, i; const u32 *mmioaddr, *mmiodata; @@ -622,7 +696,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc, const struct stepping_info *si, size_t rem_size) { - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc); + struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); u32 package_size = sizeof(struct intel_package_header); u32 num_entries, max_entries; const struct intel_fw_info *fw_info; @@ -676,7 +750,7 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc, struct intel_css_header *css_header, size_t rem_size) { - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc); + struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); if (rem_size < sizeof(struct intel_css_header)) { drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); @@ -713,7 +787,7 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv, struct intel_css_header *css_header; struct intel_package_header *package_header; struct intel_dmc_header_base *dmc_header; - struct intel_dmc *dmc = &dev_priv->dmc; + struct intel_dmc *dmc = &dev_priv->display.dmc; struct stepping_info display_info = { '*', '*'}; const struct stepping_info *si = intel_get_stepping_info(dev_priv, &display_info); u32 readcount = 0; @@ -740,7 +814,7 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv, readcount += r; for (id = 0; id < DMC_FW_MAX; id++) { - if (!dev_priv->dmc.dmc_info[id].present) + if (!dev_priv->display.dmc.dmc_info[id].present) continue; offset = readcount + dmc->dmc_info[id].dmc_offset * 4; @@ -756,15 +830,15 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv, static void intel_dmc_runtime_pm_get(struct drm_i915_private *dev_priv) { - drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref); - dev_priv->dmc.wakeref = + drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref); + dev_priv->display.dmc.wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); } static void intel_dmc_runtime_pm_put(struct drm_i915_private *dev_priv) { intel_wakeref_t wakeref __maybe_unused = - fetch_and_zero(&dev_priv->dmc.wakeref); + fetch_and_zero(&dev_priv->display.dmc.wakeref); intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); } @@ -775,10 +849,10 @@ static void dmc_load_work_fn(struct work_struct *work) struct intel_dmc *dmc; const struct firmware *fw = NULL; - dev_priv = container_of(work, typeof(*dev_priv), dmc.work); - dmc = &dev_priv->dmc; + dev_priv = container_of(work, typeof(*dev_priv), display.dmc.work); + dmc = &dev_priv->display.dmc; - request_firmware(&fw, dev_priv->dmc.fw_path, dev_priv->drm.dev); + request_firmware(&fw, dev_priv->display.dmc.fw_path, dev_priv->drm.dev); parse_dmc_fw(dev_priv, fw); if (intel_dmc_has_payload(dev_priv)) { @@ -787,7 +861,7 @@ static void dmc_load_work_fn(struct work_struct *work) drm_info(&dev_priv->drm, "Finished loading DMC firmware %s (v%u.%u)\n", - dev_priv->dmc.fw_path, DMC_VERSION_MAJOR(dmc->version), + dev_priv->display.dmc.fw_path, DMC_VERSION_MAJOR(dmc->version), DMC_VERSION_MINOR(dmc->version)); } else { drm_notice(&dev_priv->drm, @@ -810,9 +884,9 @@ static void dmc_load_work_fn(struct work_struct *work) */ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv) { - struct intel_dmc *dmc = &dev_priv->dmc; + struct intel_dmc *dmc = &dev_priv->display.dmc; - INIT_WORK(&dev_priv->dmc.work, dmc_load_work_fn); + INIT_WORK(&dev_priv->display.dmc.work, dmc_load_work_fn); if (!HAS_DMC(dev_priv)) return; @@ -895,7 +969,7 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv) } drm_dbg_kms(&dev_priv->drm, "Loading %s\n", dmc->fw_path); - schedule_work(&dev_priv->dmc.work); + schedule_work(&dev_priv->display.dmc.work); } /** @@ -911,7 +985,7 @@ void intel_dmc_ucode_suspend(struct drm_i915_private *dev_priv) if (!HAS_DMC(dev_priv)) return; - flush_work(&dev_priv->dmc.work); + flush_work(&dev_priv->display.dmc.work); /* Drop the reference held in case DMC isn't loaded. */ if (!intel_dmc_has_payload(dev_priv)) @@ -953,16 +1027,16 @@ void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv) return; intel_dmc_ucode_suspend(dev_priv); - drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref); + drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref); for (id = 0; id < DMC_FW_MAX; id++) - kfree(dev_priv->dmc.dmc_info[id].payload); + kfree(dev_priv->display.dmc.dmc_info[id].payload); } void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m, struct drm_i915_private *i915) { - struct intel_dmc *dmc = &i915->dmc; + struct intel_dmc *dmc = &i915->display.dmc; if (!HAS_DMC(i915)) return; @@ -984,7 +1058,7 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) if (!HAS_DMC(i915)) return -ENODEV; - dmc = &i915->dmc; + dmc = &i915->display.dmc; wakeref = intel_runtime_pm_get(&i915->runtime_pm); diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h index 41091aee3b47..67e03315ef99 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.h +++ b/drivers/gpu/drm/i915/display/intel_dmc.h @@ -47,6 +47,7 @@ struct intel_dmc { void intel_dmc_ucode_init(struct drm_i915_private *i915); void intel_dmc_load_program(struct drm_i915_private *i915); +void intel_dmc_disable_program(struct drm_i915_private *i915); void intel_dmc_ucode_fini(struct drm_i915_private *i915); void intel_dmc_ucode_suspend(struct drm_i915_private *i915); void intel_dmc_ucode_resume(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h index 238620b55966..5e5e41644ddf 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h +++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h @@ -28,6 +28,8 @@ #define _DMC_REG(i915, dmc_id, reg) \ ((reg) - __DMC_REG_MMIO_BASE + _DMC_REG_MMIO_BASE(i915, dmc_id)) +#define DMC_EVENT_HANDLER_COUNT_GEN12 8 + #define _DMC_EVT_HTP_0 0x8f004 #define DMC_EVT_HTP(i915, dmc_id, handler) \ diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index a4e113253df3..c9be61d2348e 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -286,11 +286,22 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp) return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1); } +static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port) +{ + int vbt_max_lanes = intel_bios_dp_max_lane_count(&dig_port->base); + int max_lanes = dig_port->max_lanes; + + if (vbt_max_lanes) + max_lanes = min(max_lanes, vbt_max_lanes); + + return max_lanes; +} + /* Theoretical max between source and sink */ static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - int source_max = dig_port->max_lanes; + int source_max = intel_dp_max_source_lane_count(dig_port); int sink_max = intel_dp->max_sink_lane_count; int fia_max = intel_tc_port_fia_max_lane_count(dig_port); int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps); @@ -389,23 +400,13 @@ static int dg2_max_source_rate(struct intel_dp *intel_dp) return intel_dp_is_edp(intel_dp) ? 810000 : 1350000; } -static bool is_low_voltage_sku(struct drm_i915_private *i915, enum phy phy) -{ - u32 voltage; - - voltage = intel_de_read(i915, ICL_PORT_COMP_DW3(phy)) & VOLTAGE_INFO_MASK; - - return voltage == VOLTAGE_INFO_0_85V; -} - static int icl_max_source_rate(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); - if (intel_phy_is_combo(dev_priv, phy) && - (is_low_voltage_sku(dev_priv, phy) || !intel_dp_is_edp(intel_dp))) + if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp)) return 540000; return 810000; @@ -413,23 +414,7 @@ static int icl_max_source_rate(struct intel_dp *intel_dp) static int ehl_max_source_rate(struct intel_dp *intel_dp) { - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); - - if (intel_dp_is_edp(intel_dp) || is_low_voltage_sku(dev_priv, phy)) - return 540000; - - return 810000; -} - -static int dg1_max_source_rate(struct intel_dp *intel_dp) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum phy phy = intel_port_to_phy(i915, dig_port->base.port); - - if (intel_phy_is_combo(i915, phy) && is_low_voltage_sku(i915, phy)) + if (intel_dp_is_edp(intel_dp)) return 540000; return 810000; @@ -491,7 +476,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp) max_rate = dg2_max_source_rate(intel_dp); else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) - max_rate = dg1_max_source_rate(intel_dp); + max_rate = 810000; else if (IS_JSL_EHL(dev_priv)) max_rate = ehl_max_source_rate(intel_dp); else @@ -720,7 +705,7 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, if (bigjoiner) { u32 max_bpp_bigjoiner = - i915->max_cdclk_freq * 48 / + i915->display.cdclk.max_cdclk_freq * 48 / intel_dp_mode_to_fec_clock(mode_clock); bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner); @@ -1312,21 +1297,45 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, } } +static bool has_seamless_m_n(struct intel_connector *connector) +{ + struct drm_i915_private *i915 = to_i915(connector->base.dev); + + /* + * Seamless M/N reprogramming only implemented + * for BDW+ double buffered M/N registers so far. + */ + return HAS_DOUBLE_BUFFERED_M_N(i915) && + intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS; +} + +static int intel_dp_mode_clock(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + + /* FIXME a bit of a mess wrt clock vs. crtc_clock */ + if (has_seamless_m_n(connector)) + return intel_panel_highest_mode(connector, adjusted_mode)->clock; + else + return adjusted_mode->crtc_clock; +} + /* Optimize link config in order: max bpp, min clock, min lanes */ static int intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state, const struct link_config_limits *limits) { - struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; - int bpp, i, lane_count; + int bpp, i, lane_count, clock = intel_dp_mode_clock(pipe_config, conn_state); int mode_rate, link_rate, link_avail; for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); - mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, - output_bpp); + mode_rate = intel_dp_link_required(clock, output_bpp); for (i = 0; i < intel_dp->num_common_rates; i++) { link_rate = intel_dp_common_rate(intel_dp, i); @@ -1377,7 +1386,18 @@ static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 max_req_bpc) return 0; } -#define DSC_SUPPORTED_VERSION_MIN 1 +static int intel_dp_source_dsc_version_minor(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + return DISPLAY_VER(i915) >= 14 ? 2 : 1; +} + +static int intel_dp_sink_dsc_version_minor(struct intel_dp *intel_dp) +{ + return (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & DP_DSC_MINOR_MASK) >> + DP_DSC_MINOR_SHIFT; +} static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) @@ -1395,6 +1415,7 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, * DP_DSC_RC_BUF_SIZE for this. */ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; + vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay; /* * Slice Height of 8 works for all currently available panels. So start @@ -1416,9 +1437,8 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; vdsc_cfg->dsc_version_minor = - min(DSC_SUPPORTED_VERSION_MIN, - (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & - DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); + min(intel_dp_source_dsc_version_minor(intel_dp), + intel_dp_sink_dsc_version_minor(intel_dp)); vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & DP_DSC_RGB; @@ -1464,6 +1484,11 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, conn_state->max_requested_bpc); + if (intel_dp->force_dsc_bpc) { + pipe_bpp = intel_dp->force_dsc_bpc * 3; + drm_dbg_kms(&dev_priv->drm, "Input DSC BPP forced to %d", pipe_bpp); + } + /* Min Input BPC for ICL+ is 8 */ if (pipe_bpp < 8 * 3) { drm_dbg_kms(&dev_priv->drm, @@ -1515,28 +1540,12 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, pipe_config->dsc.slice_count = dsc_dp_slice_count; } - /* As of today we support DSC for only RGB */ - if (intel_dp->force_dsc_bpp) { - if (intel_dp->force_dsc_bpp >= 8 && - intel_dp->force_dsc_bpp < pipe_bpp) { - drm_dbg_kms(&dev_priv->drm, - "DSC BPP forced to %d", - intel_dp->force_dsc_bpp); - pipe_config->dsc.compressed_bpp = - intel_dp->force_dsc_bpp; - } else { - drm_dbg_kms(&dev_priv->drm, - "Invalid DSC BPP %d", - intel_dp->force_dsc_bpp); - } - } - /* * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate * is greater than the maximum Cdclock and if slice count is even * then we need to use 2 VDSC instances. */ - if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq || + if (adjusted_mode->crtc_clock > dev_priv->display.cdclk.max_cdclk_freq || pipe_config->bigjoiner_pipes) { if (pipe_config->dsc.slice_count < 2) { drm_dbg_kms(&dev_priv->drm, @@ -1626,7 +1635,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, * Optimize for slow and wide for everything, because there are some * eDP 1.3 and 1.4 panels don't work well with fast and narrow. */ - ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); + ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, conn_state, &limits); if (ret || joiner_needs_dsc || intel_dp->force_dsc_en) { drm_dbg_kms(&i915->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n", @@ -1869,8 +1878,7 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, static bool cpu_transcoder_has_drrs(struct drm_i915_private *i915, enum transcoder cpu_transcoder) { - /* M1/N1 is double buffered */ - if (DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915)) + if (HAS_DOUBLE_BUFFERED_M_N(i915)) return true; return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder); @@ -1908,13 +1916,16 @@ static bool can_enable_drrs(struct intel_connector *connector, static void intel_dp_drrs_compute_config(struct intel_connector *connector, struct intel_crtc_state *pipe_config, - int output_bpp, bool constant_n) + int output_bpp) { struct drm_i915_private *i915 = to_i915(connector->base.dev); const struct drm_display_mode *downclock_mode = intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode); int pixel_clock; + if (has_seamless_m_n(connector)) + pipe_config->seamless_m_n = true; + if (!can_enable_drrs(connector, pipe_config, downclock_mode)) { if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder)) intel_zero_m_n(&pipe_config->dp_m2_n2); @@ -1932,7 +1943,7 @@ intel_dp_drrs_compute_config(struct intel_connector *connector, intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock, pipe_config->port_clock, &pipe_config->dp_m2_n2, - constant_n, pipe_config->fec_enable); + pipe_config->fec_enable); /* FIXME: abstract this better */ if (pipe_config->splitter.enable) @@ -2007,7 +2018,6 @@ intel_dp_compute_config(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(encoder); const struct drm_display_mode *fixed_mode; struct intel_connector *connector = intel_dp->attached_connector; - bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N); int ret = 0, output_bpp; if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A) @@ -2086,7 +2096,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, adjusted_mode->crtc_clock, pipe_config->port_clock, &pipe_config->dp_m_n, - constant_n, pipe_config->fec_enable); + pipe_config->fec_enable); /* FIXME: abstract this better */ if (pipe_config->splitter.enable) @@ -2097,8 +2107,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, intel_vrr_compute_config(pipe_config, conn_state); intel_psr_compute_config(intel_dp, pipe_config, conn_state); - intel_dp_drrs_compute_config(connector, pipe_config, - output_bpp, constant_n); + intel_dp_drrs_compute_config(connector, pipe_config, output_bpp); intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); @@ -5032,9 +5041,9 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector) struct drm_i915_private *i915 = to_i915(connector->dev); spin_lock_irq(&i915->irq_lock); - i915->hotplug.event_bits |= BIT(encoder->hpd_pin); + i915->display.hotplug.event_bits |= BIT(encoder->hpd_pin); spin_unlock_irq(&i915->irq_lock); - queue_delayed_work(system_wq, &i915->hotplug.hotplug_work, 0); + queue_delayed_work(system_wq, &i915->display.hotplug.hotplug_work, 0); } static const struct drm_connector_funcs intel_dp_connector_funcs = { @@ -5192,7 +5201,7 @@ intel_edp_add_properties(struct intel_dp *intel_dp) return; drm_connector_set_panel_orientation_with_quirk(&connector->base, - i915->vbt.orientation, + i915->display.vbt.orientation, fixed_mode->hdisplay, fixed_mode->vdisplay); } @@ -5302,8 +5311,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, intel_panel_init(intel_connector); - if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK)) - intel_connector->panel.backlight.power = intel_pps_backlight_power; intel_backlight_setup(intel_connector, pipe); intel_edp_add_properties(intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c index 2bc119374555..48c375c65a41 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c @@ -42,7 +42,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp) bool done; #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) - done = wait_event_timeout(i915->gmbus_wait_queue, C, + done = wait_event_timeout(i915->display.gmbus.wait_queue, C, msecs_to_jiffies_timeout(timeout_ms)); /* just trace the final value */ @@ -86,7 +86,7 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) * divide by 2000 and use that */ if (dig_port->aux_ch == AUX_CH_A) - freq = dev_priv->cdclk.hw.cdclk; + freq = dev_priv->display.cdclk.hw.cdclk; else freq = RUNTIME_INFO(dev_priv)->rawclk_freq; return DIV_ROUND_CLOSEST(freq, 2000); @@ -150,6 +150,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, u32 unused) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); u32 ret; /* @@ -170,6 +171,13 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, if (intel_tc_port_in_tbt_alt_mode(dig_port)) ret |= DP_AUX_CH_CTL_TBT_IO; + /* + * Power request bit is already set during aux power well enable. + * Preserve the bit across aux transactions. + */ + if (DISPLAY_VER(i915) >= 14) + ret |= XELPDP_DP_AUX_CH_CTL_POWER_REQUEST; + return ret; } @@ -629,6 +637,46 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index) } } +static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum aux_ch aux_ch = dig_port->aux_ch; + + switch (aux_ch) { + case AUX_CH_A: + case AUX_CH_B: + case AUX_CH_USBC1: + case AUX_CH_USBC2: + case AUX_CH_USBC3: + case AUX_CH_USBC4: + return XELPDP_DP_AUX_CH_CTL(aux_ch); + default: + MISSING_CASE(aux_ch); + return XELPDP_DP_AUX_CH_CTL(AUX_CH_A); + } +} + +static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum aux_ch aux_ch = dig_port->aux_ch; + + switch (aux_ch) { + case AUX_CH_A: + case AUX_CH_B: + case AUX_CH_USBC1: + case AUX_CH_USBC2: + case AUX_CH_USBC3: + case AUX_CH_USBC4: + return XELPDP_DP_AUX_CH_DATA(aux_ch, index); + default: + MISSING_CASE(aux_ch); + return XELPDP_DP_AUX_CH_DATA(AUX_CH_A, index); + } +} + void intel_dp_aux_fini(struct intel_dp *intel_dp) { if (cpu_latency_qos_request_active(&intel_dp->pm_qos)) @@ -644,7 +692,10 @@ void intel_dp_aux_init(struct intel_dp *intel_dp) struct intel_encoder *encoder = &dig_port->base; enum aux_ch aux_ch = dig_port->aux_ch; - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(dev_priv) >= 14) { + intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg; + intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg; + } else if (DISPLAY_VER(dev_priv) >= 12) { intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg; intel_dp->aux_ch_data_reg = tgl_aux_data_reg; } else if (DISPLAY_VER(dev_priv) >= 9) { diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c index a7640dbcf00e..88689124c013 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c @@ -17,6 +17,7 @@ #include "intel_dp.h" #include "intel_dp_hdcp.h" #include "intel_hdcp.h" +#include "intel_hdcp_regs.h" static unsigned int transcoder_to_stream_enc_status(enum transcoder cpu_transcoder) { diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 9feaf1a589f3..3d3efcf02011 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -37,17 +37,6 @@ static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp) DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0; } -static const char *intel_dp_phy_name(enum drm_dp_phy dp_phy, - char *buf, size_t buf_size) -{ - if (dp_phy == DP_PHY_DPRX) - snprintf(buf, buf_size, "DPRX"); - else - snprintf(buf, buf_size, "LTTPR %d", dp_phy - DP_PHY_LTTPR1 + 1); - - return buf; -} - static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy) { @@ -60,20 +49,19 @@ static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp, { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); - char phy_name[10]; - - intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)); if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dpcd, dp_phy, phy_caps) < 0) { drm_dbg_kms(&dp_to_i915(intel_dp)->drm, "[ENCODER:%d:%s][%s] failed to read the PHY caps\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); return; } drm_dbg_kms(&dp_to_i915(intel_dp)->drm, "[ENCODER:%d:%s][%s] PHY capabilities: %*ph\n", - encoder->base.base.id, encoder->base.name, phy_name, + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy), (int)sizeof(intel_dp->lttpr_phy_caps[0]), phy_caps); } @@ -423,14 +411,13 @@ intel_dp_get_adjust_train(struct intel_dp *intel_dp, { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_i915_private *i915 = to_i915(encoder->base.dev); - char phy_name[10]; int lane; if (intel_dp_is_uhbr(crtc_state)) { drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, " "TX FFE request: " TRAIN_REQ_FMT "\n", encoder->base.base.id, encoder->base.name, - intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)), + drm_dp_phy_name(dp_phy), crtc_state->lane_count, TRAIN_REQ_TX_FFE_ARGS(link_status)); } else { @@ -438,7 +425,7 @@ intel_dp_get_adjust_train(struct intel_dp *intel_dp, "vswing request: " TRAIN_REQ_FMT ", " "pre-emphasis request: " TRAIN_REQ_FMT "\n", encoder->base.base.id, encoder->base.name, - intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)), + drm_dp_phy_name(dp_phy), crtc_state->lane_count, TRAIN_REQ_VSWING_ARGS(link_status), TRAIN_REQ_PREEMPH_ARGS(link_status)); @@ -503,13 +490,12 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_i915_private *i915 = to_i915(encoder->base.dev); u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat); - char phy_name[10]; if (train_pat != DP_TRAINING_PATTERN_DISABLE) drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] Using DP training pattern TPS%c\n", encoder->base.base.id, encoder->base.name, - intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)), + drm_dp_phy_name(dp_phy), dp_training_pattern_name(train_pat)); intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat); @@ -546,13 +532,12 @@ void intel_dp_set_signal_levels(struct intel_dp *intel_dp, { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_i915_private *i915 = to_i915(encoder->base.dev); - char phy_name[10]; if (intel_dp_is_uhbr(crtc_state)) { drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, " "TX FFE presets: " TRAIN_SET_FMT "\n", encoder->base.base.id, encoder->base.name, - intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)), + drm_dp_phy_name(dp_phy), crtc_state->lane_count, TRAIN_SET_TX_FFE_ARGS(intel_dp->train_set)); } else { @@ -560,7 +545,7 @@ void intel_dp_set_signal_levels(struct intel_dp *intel_dp, "vswing levels: " TRAIN_SET_FMT ", " "pre-emphasis levels: " TRAIN_SET_FMT "\n", encoder->base.base.id, encoder->base.name, - intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)), + drm_dp_phy_name(dp_phy), crtc_state->lane_count, TRAIN_SET_VSWING_ARGS(intel_dp->train_set), TRAIN_SET_PREEMPH_ARGS(intel_dp->train_set)); @@ -671,6 +656,28 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp, intel_dp_compute_rate(intel_dp, crtc_state->port_clock, &link_bw, &rate_select); + /* + * WaEdpLinkRateDataReload + * + * Parade PS8461E MUX (used on varius TGL+ laptops) needs + * to snoop the link rates reported by the sink when we + * use LINK_RATE_SET in order to operate in jitter cleaning + * mode (as opposed to redriver mode). Unfortunately it + * loses track of the snooped link rates when powered down, + * so we need to make it re-snoop often. Without this high + * link rates are not stable. + */ + if (!link_bw) { + struct intel_connector *connector = intel_dp->attached_connector; + __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; + + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Reloading eDP link rates\n", + connector->base.base.id, connector->base.name); + + drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, + sink_rates, sizeof(sink_rates)); + } + if (link_bw) drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Using LINK_BW_SET value %02x\n", @@ -732,12 +739,11 @@ intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy, { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_i915_private *i915 = to_i915(encoder->base.dev); - char phy_name[10]; drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n", encoder->base.base.id, encoder->base.name, - intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)), + drm_dp_phy_name(dp_phy), link_status[0], link_status[1], link_status[2], link_status[3], link_status[4], link_status[5]); } @@ -757,21 +763,19 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, int voltage_tries, cr_tries, max_cr_tries; u8 link_status[DP_LINK_STATUS_SIZE]; bool max_vswing_reached = false; - char phy_name[10]; int delay_us; delay_us = drm_dp_read_clock_recovery_delay(&intel_dp->aux, intel_dp->dpcd, dp_phy, intel_dp_is_uhbr(crtc_state)); - intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)); - /* clock recovery */ if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy, DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE)) { drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to enable link training\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); return false; } @@ -795,14 +799,16 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy, link_status) < 0) { drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to get link status\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); return false; } if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) { drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] Clock recovery OK\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); return true; } @@ -810,7 +816,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, intel_dp_dump_link_status(intel_dp, dp_phy, link_status); drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] Same voltage tried 5 times\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); return false; } @@ -818,7 +825,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, intel_dp_dump_link_status(intel_dp, dp_phy, link_status); drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] Max Voltage Swing reached\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); return false; } @@ -828,7 +836,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) { drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to update link training\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); return false; } @@ -846,7 +855,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, intel_dp_dump_link_status(intel_dp, dp_phy, link_status); drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed clock recovery %d times, giving up!\n", - encoder->base.base.id, encoder->base.name, phy_name, max_cr_tries); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy), max_cr_tries); return false; } @@ -924,15 +934,12 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, u32 training_pattern; u8 link_status[DP_LINK_STATUS_SIZE]; bool channel_eq = false; - char phy_name[10]; int delay_us; delay_us = drm_dp_read_channel_eq_delay(&intel_dp->aux, intel_dp->dpcd, dp_phy, intel_dp_is_uhbr(crtc_state)); - intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)); - training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy); /* Scrambling is disabled for TPS2/3 and enabled for TPS4 */ if (training_pattern != DP_TRAINING_PATTERN_4) @@ -944,7 +951,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to start channel equalization\n", encoder->base.base.id, encoder->base.name, - phy_name); + drm_dp_phy_name(dp_phy)); return false; } @@ -955,7 +962,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, link_status) < 0) { drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to get link status\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); break; } @@ -966,7 +974,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] Clock recovery check failed, cannot " "continue channel equalization\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); break; } @@ -975,7 +984,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, channel_eq = true; drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] Channel EQ done. DP Training successful\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); break; } @@ -985,7 +995,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) { drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to update link training\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); break; } } @@ -995,7 +1006,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, intel_dp_dump_link_status(intel_dp, dp_phy, link_status); drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] Channel equalization failed 5 times\n", - encoder->base.base.id, encoder->base.name, phy_name); + encoder->base.base.id, encoder->base.name, + drm_dp_phy_name(dp_phy)); } return channel_eq; @@ -1070,7 +1082,6 @@ intel_dp_link_train_phy(struct intel_dp *intel_dp, { struct intel_connector *connector = intel_dp->attached_connector; struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - char phy_name[10]; bool ret = false; if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy)) @@ -1086,7 +1097,7 @@ out: "[CONNECTOR:%d:%s][ENCODER:%d:%s][%s] Link Training %s at link rate = %d, lane count = %d\n", connector->base.base.id, connector->base.name, encoder->base.base.id, encoder->base.name, - intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)), + drm_dp_phy_name(dp_phy), ret ? "passed" : "failed", crtc_state->port_clock, crtc_state->lane_count); diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 7713c19042f3..03604a37931c 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -58,7 +58,6 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, struct drm_i915_private *i915 = to_i915(connector->base.dev); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N); int bpp, slots = -EINVAL; mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr); @@ -100,7 +99,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, adjusted_mode->crtc_clock, crtc_state->port_clock, &crtc_state->dp_m_n, - constant_n, crtc_state->fec_enable); + crtc_state->fec_enable); crtc_state->dp_m_n.tu = slots; return 0; @@ -566,7 +565,10 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, drm_dp_add_payload_part2(&intel_dp->mst_mgr, &state->base, drm_atomic_get_mst_payload_state(mst_state, connector->port)); - if (DISPLAY_VER(dev_priv) >= 12 && pipe_config->fec_enable) + if (DISPLAY_VER(dev_priv) >= 14 && pipe_config->fec_enable) + intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(trans), 0, + FECSTALL_DIS_DPTSTREAM_DPTTG); + else if (DISPLAY_VER(dev_priv) >= 12 && pipe_config->fec_enable) intel_de_rmw(dev_priv, CHICKEN_TRANS(trans), 0, FECSTALL_DIS_DPTSTREAM_DPTTG); diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c index cc6abe761f5e..8732b8722ed7 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c @@ -484,7 +484,7 @@ void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) enum dpio_phy rcomp_phy = phy_info->rcomp_phy; bool was_enabled; - lockdep_assert_held(&dev_priv->power_domains.lock); + lockdep_assert_held(&dev_priv->display.power.domains.lock); was_enabled = true; if (rcomp_phy != -1) diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index 5262f16b45ac..b15ba78d64d6 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -938,12 +938,25 @@ static int hsw_crtc_compute_clock(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder = intel_get_crtc_new_encoder(state, crtc_state); + int ret; if (DISPLAY_VER(dev_priv) < 11 && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) return 0; - return intel_compute_shared_dplls(state, crtc, encoder); + ret = intel_compute_shared_dplls(state, crtc, encoder); + if (ret) + return ret; + + /* FIXME this is a mess */ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) + return 0; + + /* CRT dotclock is determined via other means */ + if (!crtc_state->has_pch_encoder) + crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); + + return 0; } static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state, @@ -969,8 +982,15 @@ static int dg2_crtc_compute_clock(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder = intel_get_crtc_new_encoder(state, crtc_state); + int ret; + + ret = intel_mpllb_calc_state(crtc_state, encoder); + if (ret) + return ret; + + crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); - return intel_mpllb_calc_state(crtc_state, encoder); + return 0; } static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor) @@ -991,7 +1011,7 @@ static void ilk_update_pll_dividers(struct intel_crtc_state *crtc_state, factor = 21; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if ((intel_panel_use_ssc(dev_priv) && - dev_priv->vbt.lvds_ssc_freq == 100000) || + dev_priv->display.vbt.lvds_ssc_freq == 100000) || (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev_priv))) factor = 25; @@ -1096,6 +1116,7 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); const struct intel_limit *limit; int refclk = 120000; + int ret; /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ if (!crtc_state->has_pch_encoder) @@ -1105,8 +1126,8 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state, if (intel_panel_use_ssc(dev_priv)) { drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", - dev_priv->vbt.lvds_ssc_freq); - refclk = dev_priv->vbt.lvds_ssc_freq; + dev_priv->display.vbt.lvds_ssc_freq); + refclk = dev_priv->display.vbt.lvds_ssc_freq; } if (intel_is_dual_link_lvds(dev_priv)) { @@ -1132,7 +1153,14 @@ static int ilk_crtc_compute_clock(struct intel_atomic_state *state, ilk_compute_dpll(crtc_state, &crtc_state->dpll, &crtc_state->dpll); - return intel_compute_shared_dplls(state, crtc, NULL); + ret = intel_compute_shared_dplls(state, crtc, NULL); + if (ret) + return ret; + + crtc_state->port_clock = crtc_state->dpll.dot; + crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); + + return ret; } static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state, @@ -1198,6 +1226,13 @@ static int chv_crtc_compute_clock(struct intel_atomic_state *state, chv_compute_dpll(crtc_state); + /* FIXME this is a mess */ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) + return 0; + + crtc_state->port_clock = crtc_state->dpll.dot; + crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); + return 0; } @@ -1217,6 +1252,13 @@ static int vlv_crtc_compute_clock(struct intel_atomic_state *state, vlv_compute_dpll(crtc_state); + /* FIXME this is a mess */ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) + return 0; + + crtc_state->port_clock = crtc_state->dpll.dot; + crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); + return 0; } @@ -1231,7 +1273,7 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state, if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { - refclk = dev_priv->vbt.lvds_ssc_freq; + refclk = dev_priv->display.vbt.lvds_ssc_freq; drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", refclk); @@ -1259,6 +1301,11 @@ static int g4x_crtc_compute_clock(struct intel_atomic_state *state, i9xx_compute_dpll(crtc_state, &crtc_state->dpll, &crtc_state->dpll); + crtc_state->port_clock = crtc_state->dpll.dot; + /* FIXME this is a mess */ + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT)) + crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); + return 0; } @@ -1273,7 +1320,7 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state, if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { - refclk = dev_priv->vbt.lvds_ssc_freq; + refclk = dev_priv->display.vbt.lvds_ssc_freq; drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", refclk); @@ -1292,6 +1339,9 @@ static int pnv_crtc_compute_clock(struct intel_atomic_state *state, i9xx_compute_dpll(crtc_state, &crtc_state->dpll, &crtc_state->dpll); + crtc_state->port_clock = crtc_state->dpll.dot; + crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); + return 0; } @@ -1306,7 +1356,7 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state, if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { - refclk = dev_priv->vbt.lvds_ssc_freq; + refclk = dev_priv->display.vbt.lvds_ssc_freq; drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", refclk); @@ -1325,6 +1375,11 @@ static int i9xx_crtc_compute_clock(struct intel_atomic_state *state, i9xx_compute_dpll(crtc_state, &crtc_state->dpll, &crtc_state->dpll); + crtc_state->port_clock = crtc_state->dpll.dot; + /* FIXME this is a mess */ + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_TVOUT)) + crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); + return 0; } @@ -1339,7 +1394,7 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state, if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { - refclk = dev_priv->vbt.lvds_ssc_freq; + refclk = dev_priv->display.vbt.lvds_ssc_freq; drm_dbg_kms(&dev_priv->drm, "using SSC reference clock of %d kHz\n", refclk); @@ -1360,6 +1415,9 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state, i8xx_compute_dpll(crtc_state, &crtc_state->dpll, &crtc_state->dpll); + crtc_state->port_clock = crtc_state->dpll.dot; + crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); + return 0; } @@ -1411,16 +1469,13 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state, drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state)); - if (drm_WARN_ON(&i915->drm, crtc_state->shared_dpll)) - return 0; - memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); if (!crtc_state->hw.enable) return 0; - ret = i915->dpll_funcs->crtc_compute_clock(state, crtc); + ret = i915->display.funcs.dpll->crtc_compute_clock(state, crtc); if (ret) { drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't calculate DPLL settings\n", crtc->base.base.id, crtc->base.name); @@ -1439,17 +1494,15 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state, int ret; drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state)); + drm_WARN_ON(&i915->drm, !crtc_state->hw.enable && crtc_state->shared_dpll); - if (drm_WARN_ON(&i915->drm, crtc_state->shared_dpll)) - return 0; - - if (!crtc_state->hw.enable) + if (!crtc_state->hw.enable || crtc_state->shared_dpll) return 0; - if (!i915->dpll_funcs->crtc_get_shared_dpll) + if (!i915->display.funcs.dpll->crtc_get_shared_dpll) return 0; - ret = i915->dpll_funcs->crtc_get_shared_dpll(state, crtc); + ret = i915->display.funcs.dpll->crtc_get_shared_dpll(state, crtc); if (ret) { drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] Couldn't get a shared DPLL\n", crtc->base.base.id, crtc->base.name); @@ -1463,23 +1516,23 @@ void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv) { if (IS_DG2(dev_priv)) - dev_priv->dpll_funcs = &dg2_dpll_funcs; + dev_priv->display.funcs.dpll = &dg2_dpll_funcs; else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv)) - dev_priv->dpll_funcs = &hsw_dpll_funcs; + dev_priv->display.funcs.dpll = &hsw_dpll_funcs; else if (HAS_PCH_SPLIT(dev_priv)) - dev_priv->dpll_funcs = &ilk_dpll_funcs; + dev_priv->display.funcs.dpll = &ilk_dpll_funcs; else if (IS_CHERRYVIEW(dev_priv)) - dev_priv->dpll_funcs = &chv_dpll_funcs; + dev_priv->display.funcs.dpll = &chv_dpll_funcs; else if (IS_VALLEYVIEW(dev_priv)) - dev_priv->dpll_funcs = &vlv_dpll_funcs; + dev_priv->display.funcs.dpll = &vlv_dpll_funcs; else if (IS_G4X(dev_priv)) - dev_priv->dpll_funcs = &g4x_dpll_funcs; + dev_priv->display.funcs.dpll = &g4x_dpll_funcs; else if (IS_PINEVIEW(dev_priv)) - dev_priv->dpll_funcs = &pnv_dpll_funcs; + dev_priv->display.funcs.dpll = &pnv_dpll_funcs; else if (DISPLAY_VER(dev_priv) != 2) - dev_priv->dpll_funcs = &i9xx_dpll_funcs; + dev_priv->display.funcs.dpll = &i9xx_dpll_funcs; else - dev_priv->dpll_funcs = &i8xx_dpll_funcs; + dev_priv->display.funcs.dpll = &i8xx_dpll_funcs; } static bool i9xx_has_pps(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 118598c9a809..e5fb66a5dd02 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -113,8 +113,8 @@ intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv, enum intel_dpll_id i; /* Copy shared dpll state */ - for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { - struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i]; + for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) { + struct intel_shared_dpll *pll = &dev_priv->display.dpll.shared_dplls[i]; shared_dpll[i] = pll->state; } @@ -149,7 +149,7 @@ struct intel_shared_dpll * intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv, enum intel_dpll_id id) { - return &dev_priv->dpll.shared_dplls[id]; + return &dev_priv->display.dpll.shared_dplls[id]; } /** @@ -164,11 +164,11 @@ enum intel_dpll_id intel_get_shared_dpll_id(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { - long pll_idx = pll - dev_priv->dpll.shared_dplls; + long pll_idx = pll - dev_priv->display.dpll.shared_dplls; if (drm_WARN_ON(&dev_priv->drm, pll_idx < 0 || - pll_idx >= dev_priv->dpll.num_shared_dpll)) + pll_idx >= dev_priv->display.dpll.num_shared_dpll)) return -1; return pll_idx; @@ -245,7 +245,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state) if (drm_WARN_ON(&dev_priv->drm, pll == NULL)) return; - mutex_lock(&dev_priv->dpll.lock); + mutex_lock(&dev_priv->display.dpll.lock); old_mask = pll->active_mask; if (drm_WARN_ON(&dev_priv->drm, !(pll->state.pipe_mask & pipe_mask)) || @@ -271,7 +271,7 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state) pll->on = true; out: - mutex_unlock(&dev_priv->dpll.lock); + mutex_unlock(&dev_priv->display.dpll.lock); } /** @@ -294,7 +294,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state) if (pll == NULL) return; - mutex_lock(&dev_priv->dpll.lock); + mutex_lock(&dev_priv->display.dpll.lock); if (drm_WARN(&dev_priv->drm, !(pll->active_mask & pipe_mask), "%s not used by [CRTC:%d:%s]\n", pll->info->name, crtc->base.base.id, crtc->base.name)) @@ -317,7 +317,7 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state) pll->on = false; out: - mutex_unlock(&dev_priv->dpll.lock); + mutex_unlock(&dev_priv->display.dpll.lock); } static struct intel_shared_dpll * @@ -336,7 +336,7 @@ intel_find_shared_dpll(struct intel_atomic_state *state, drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1)); for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) { - pll = &dev_priv->dpll.shared_dplls[i]; + pll = &dev_priv->display.dpll.shared_dplls[i]; /* Only want to check enabled timings first */ if (shared_dpll[i].pipe_mask == 0) { @@ -436,9 +436,9 @@ void intel_shared_dpll_swap_state(struct intel_atomic_state *state) if (!state->dpll_set) return; - for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { + for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) { struct intel_shared_dpll *pll = - &dev_priv->dpll.shared_dplls[i]; + &dev_priv->display.dpll.shared_dplls[i]; swap(pll->state, shared_dpll[i]); } @@ -537,7 +537,7 @@ static int ibx_get_dpll(struct intel_atomic_state *state, if (HAS_PCH_IBX(dev_priv)) { /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ i = (enum intel_dpll_id) crtc->pipe; - pll = &dev_priv->dpll.shared_dplls[i]; + pll = &dev_priv->display.dpll.shared_dplls[i]; drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n", @@ -905,37 +905,6 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */, *r2_out = best.r2; } -static int -hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - unsigned int p, n2, r2; - - hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p); - - crtc_state->dpll_hw_state.wrpll = - WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL | - WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | - WRPLL_DIVIDER_POST(p); - - return 0; -} - -static struct intel_shared_dpll * -hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - - return intel_find_shared_dpll(state, crtc, - &crtc_state->dpll_hw_state, - BIT(DPLL_ID_WRPLL2) | - BIT(DPLL_ID_WRPLL1)); -} - static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, const struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *pll_state) @@ -948,7 +917,7 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, case WRPLL_REF_SPECIAL_HSW: /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */ if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) { - refclk = dev_priv->dpll.ref_clks.nssc; + refclk = dev_priv->display.dpll.ref_clks.nssc; break; } fallthrough; @@ -958,7 +927,7 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, * code only cares about 5% accuracy, and spread is a max of * 0.5% downspread. */ - refclk = dev_priv->dpll.ref_clks.ssc; + refclk = dev_priv->display.dpll.ref_clks.ssc; break; case WRPLL_REF_LCPLL: refclk = 2700000; @@ -977,6 +946,41 @@ static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, } static int +hsw_ddi_wrpll_compute_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + unsigned int p, n2, r2; + + hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p); + + crtc_state->dpll_hw_state.wrpll = + WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL | + WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | + WRPLL_DIVIDER_POST(p); + + crtc_state->port_clock = hsw_ddi_wrpll_get_freq(i915, NULL, + &crtc_state->dpll_hw_state); + + return 0; +} + +static struct intel_shared_dpll * +hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + return intel_find_shared_dpll(state, crtc, + &crtc_state->dpll_hw_state, + BIT(DPLL_ID_WRPLL2) | + BIT(DPLL_ID_WRPLL1)); +} + +static int hsw_ddi_lcpll_compute_dpll(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); @@ -1145,12 +1149,12 @@ static int hsw_get_dpll(struct intel_atomic_state *state, static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915) { - i915->dpll.ref_clks.ssc = 135000; + i915->display.dpll.ref_clks.ssc = 135000; /* Non-SSC is only used on non-ULT HSW. */ if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT) - i915->dpll.ref_clks.nssc = 24000; + i915->display.dpll.ref_clks.nssc = 24000; else - i915->dpll.ref_clks.nssc = 135000; + i915->display.dpll.ref_clks.nssc = 135000; } static void hsw_dump_hw_state(struct drm_i915_private *dev_priv, @@ -1618,48 +1622,11 @@ skip_remaining_dividers: return 0; } -static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - struct skl_wrpll_params wrpll_params = {}; - u32 ctrl1, cfgcr1, cfgcr2; - int ret; - - /* - * See comment in intel_dpll_hw_state to understand why we always use 0 - * as the DPLL id in this function. - */ - ctrl1 = DPLL_CTRL1_OVERRIDE(0); - - ctrl1 |= DPLL_CTRL1_HDMI_MODE(0); - - ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000, - i915->dpll.ref_clks.nssc, &wrpll_params); - if (ret) - return ret; - - cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE | - DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) | - wrpll_params.dco_integer; - - cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) | - DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) | - DPLL_CFGCR2_KDIV(wrpll_params.kdiv) | - DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | - wrpll_params.central_freq; - - crtc_state->dpll_hw_state.ctrl1 = ctrl1; - crtc_state->dpll_hw_state.cfgcr1 = cfgcr1; - crtc_state->dpll_hw_state.cfgcr2 = cfgcr2; - - return 0; -} - static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915, const struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *pll_state) { - int ref_clock = i915->dpll.ref_clks.nssc; + int ref_clock = i915->display.dpll.ref_clks.nssc; u32 p0, p1, p2, dco_freq; p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK; @@ -1726,6 +1693,46 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915, return dco_freq / (p0 * p1 * p2 * 5); } +static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct skl_wrpll_params wrpll_params = {}; + u32 ctrl1, cfgcr1, cfgcr2; + int ret; + + /* + * See comment in intel_dpll_hw_state to understand why we always use 0 + * as the DPLL id in this function. + */ + ctrl1 = DPLL_CTRL1_OVERRIDE(0); + + ctrl1 |= DPLL_CTRL1_HDMI_MODE(0); + + ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000, + i915->display.dpll.ref_clks.nssc, &wrpll_params); + if (ret) + return ret; + + cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE | + DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) | + wrpll_params.dco_integer; + + cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) | + DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) | + DPLL_CFGCR2_KDIV(wrpll_params.kdiv) | + DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | + wrpll_params.central_freq; + + crtc_state->dpll_hw_state.ctrl1 = ctrl1; + crtc_state->dpll_hw_state.cfgcr1 = cfgcr1; + crtc_state->dpll_hw_state.cfgcr2 = cfgcr2; + + crtc_state->port_clock = skl_ddi_wrpll_get_freq(i915, NULL, + &crtc_state->dpll_hw_state); + + return 0; +} + static int skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { @@ -1858,7 +1865,7 @@ static int skl_ddi_pll_get_freq(struct drm_i915_private *i915, static void skl_update_dpll_ref_clks(struct drm_i915_private *i915) { /* No SSC ref */ - i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref; + i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref; } static void skl_dump_hw_state(struct drm_i915_private *dev_priv, @@ -2171,7 +2178,7 @@ static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state, } } - chv_calc_dpll_params(i915->dpll.ref_clks.nssc, clk_div); + chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, clk_div); drm_WARN_ON(&i915->drm, clk_div->vco == 0 || clk_div->dot != crtc_state->port_clock); @@ -2245,6 +2252,23 @@ static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state, return 0; } +static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *pll_state) +{ + struct dpll clock; + + clock.m1 = 2; + clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22; + if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE) + clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2); + clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1); + clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0); + clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0); + + return chv_calc_dpll_params(i915->display.dpll.ref_clks.nssc, &clock); +} + static int bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { @@ -2258,28 +2282,20 @@ bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) static int bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); struct dpll clk_div = {}; + int ret; bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div); - return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div); -} - -static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915, - const struct intel_shared_dpll *pll, - const struct intel_dpll_hw_state *pll_state) -{ - struct dpll clock; + ret = bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div); + if (ret) + return ret; - clock.m1 = 2; - clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, pll_state->pll0) << 22; - if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE) - clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, pll_state->pll2); - clock.n = REG_FIELD_GET(PORT_PLL_N_MASK, pll_state->pll1); - clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, pll_state->ebb0); - clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, pll_state->ebb0); + crtc_state->port_clock = bxt_ddi_pll_get_freq(i915, NULL, + &crtc_state->dpll_hw_state); - return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock); + return 0; } static int bxt_compute_dpll(struct intel_atomic_state *state, @@ -2324,8 +2340,8 @@ static int bxt_get_dpll(struct intel_atomic_state *state, static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915) { - i915->dpll.ref_clks.ssc = 100000; - i915->dpll.ref_clks.nssc = 100000; + i915->display.dpll.ref_clks.ssc = 100000; + i915->display.dpll.ref_clks.nssc = 100000; /* DSI non-SSC ref 19.2MHz */ } @@ -2468,7 +2484,7 @@ ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915) return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) && IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) && - i915->dpll.ref_clks.nssc == 38400; + i915->display.dpll.ref_clks.nssc == 38400; } struct icl_combo_pll_params { @@ -2562,7 +2578,7 @@ static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state, { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); const struct icl_combo_pll_params *params = - dev_priv->dpll.ref_clks.nssc == 24000 ? + dev_priv->display.dpll.ref_clks.nssc == 24000 ? icl_dp_combo_pll_24MHz_values : icl_dp_combo_pll_19_2MHz_values; int clock = crtc_state->port_clock; @@ -2585,9 +2601,9 @@ static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); if (DISPLAY_VER(dev_priv) >= 12) { - switch (dev_priv->dpll.ref_clks.nssc) { + switch (dev_priv->display.dpll.ref_clks.nssc) { default: - MISSING_CASE(dev_priv->dpll.ref_clks.nssc); + MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc); fallthrough; case 19200: case 38400: @@ -2598,9 +2614,9 @@ static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, break; } } else { - switch (dev_priv->dpll.ref_clks.nssc) { + switch (dev_priv->display.dpll.ref_clks.nssc) { default: - MISSING_CASE(dev_priv->dpll.ref_clks.nssc); + MISSING_CASE(dev_priv->display.dpll.ref_clks.nssc); fallthrough; case 19200: case 38400: @@ -2630,7 +2646,7 @@ static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915, static int icl_wrpll_ref_clock(struct drm_i915_private *i915) { - int ref_clock = i915->dpll.ref_clks.nssc; + int ref_clock = i915->display.dpll.ref_clks.nssc; /* * For ICL+, the spec states: if reference frequency is 38.4, @@ -2769,8 +2785,8 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915, else pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400; - if (i915->vbt.override_afc_startup) - pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->vbt.override_afc_startup_val); + if (i915->display.vbt.override_afc_startup) + pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->display.vbt.override_afc_startup_val); } static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, @@ -2857,7 +2873,7 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, struct intel_dpll_hw_state *pll_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - int refclk_khz = dev_priv->dpll.ref_clks.nssc; + int refclk_khz = dev_priv->display.dpll.ref_clks.nssc; int clock = crtc_state->port_clock; u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac; u32 iref_ndiv, iref_trim, iref_pulse_w; @@ -2965,8 +2981,8 @@ static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, DKL_PLL_DIV0_PROP_COEFF(prop_coeff) | DKL_PLL_DIV0_FBPREDIV(m1div) | DKL_PLL_DIV0_FBDIV_INT(m2div_int); - if (dev_priv->vbt.override_afc_startup) { - u8 val = dev_priv->vbt.override_afc_startup_val; + if (dev_priv->display.vbt.override_afc_startup) { + u8 val = dev_priv->display.vbt.override_afc_startup_val; pll_state->mg_pll_div0 |= DKL_PLL_DIV0_AFC_STARTUP(val); } @@ -3063,7 +3079,7 @@ static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv, u32 m1, m2_int, m2_frac, div1, div2, ref_clock; u64 tmp; - ref_clock = dev_priv->dpll.ref_clks.nssc; + ref_clock = dev_priv->display.dpll.ref_clks.nssc; if (DISPLAY_VER(dev_priv) >= 12) { m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK; @@ -3197,6 +3213,12 @@ static int icl_compute_combo_phy_dpll(struct intel_atomic_state *state, icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state); + /* this is mainly for the fastset check */ + icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT); + + crtc_state->port_clock = icl_ddi_combo_pll_get_freq(dev_priv, NULL, + &port_dpll->hw_state); + return 0; } @@ -3282,6 +3304,12 @@ static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state, if (ret) return ret; + /* this is mainly for the fastset check */ + icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY); + + crtc_state->port_clock = icl_ddi_mg_pll_get_freq(dev_priv, NULL, + &port_dpll->hw_state); + return 0; } @@ -3440,7 +3468,7 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv, hw_state->mg_pll_tdc_coldst_bias = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port)); - if (dev_priv->dpll.ref_clks.nssc == 38400) { + if (dev_priv->display.dpll.ref_clks.nssc == 38400) { hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART; hw_state->mg_pll_bias_mask = 0; } else { @@ -3502,7 +3530,7 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv, hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port)); val = DKL_PLL_DIV0_MASK; - if (dev_priv->vbt.override_afc_startup) + if (dev_priv->display.vbt.override_afc_startup) val |= DKL_PLL_DIV0_AFC_STARTUP_MASK; hw_state->mg_pll_div0 &= val; @@ -3566,7 +3594,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, TGL_DPLL_CFGCR0(id)); hw_state->cfgcr1 = intel_de_read(dev_priv, TGL_DPLL_CFGCR1(id)); - if (dev_priv->vbt.override_afc_startup) { + if (dev_priv->display.vbt.override_afc_startup) { hw_state->div0 = intel_de_read(dev_priv, TGL_DPLL0_DIV0(id)); hw_state->div0 &= TGL_DPLL0_DIV0_AFC_STARTUP_MASK; } @@ -3638,9 +3666,9 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv, intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0); intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1); - drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->vbt.override_afc_startup && + drm_WARN_ON_ONCE(&dev_priv->drm, dev_priv->display.vbt.override_afc_startup && !i915_mmio_reg_valid(div0_reg)); - if (dev_priv->vbt.override_afc_startup && + if (dev_priv->display.vbt.override_afc_startup && i915_mmio_reg_valid(div0_reg)) intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0); @@ -3732,7 +3760,7 @@ static void dkl_pll_write(struct drm_i915_private *dev_priv, intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val); val = DKL_PLL_DIV0_MASK; - if (dev_priv->vbt.override_afc_startup) + if (dev_priv->display.vbt.override_afc_startup) val |= DKL_PLL_DIV0_AFC_STARTUP_MASK; intel_de_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val, hw_state->mg_pll_div0); @@ -3967,7 +3995,7 @@ static void mg_pll_disable(struct drm_i915_private *dev_priv, static void icl_update_dpll_ref_clks(struct drm_i915_private *i915) { /* No SSC ref */ - i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref; + i915->display.dpll.ref_clks.nssc = i915->display.cdclk.hw.ref; } static void icl_dump_hw_state(struct drm_i915_private *dev_priv, @@ -4192,22 +4220,24 @@ void intel_shared_dpll_init(struct drm_i915_private *dev_priv) dpll_mgr = &pch_pll_mgr; if (!dpll_mgr) { - dev_priv->dpll.num_shared_dpll = 0; + dev_priv->display.dpll.num_shared_dpll = 0; return; } dpll_info = dpll_mgr->dpll_info; for (i = 0; dpll_info[i].name; i++) { + if (drm_WARN_ON(&dev_priv->drm, + i >= ARRAY_SIZE(dev_priv->display.dpll.shared_dplls))) + break; + drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id); - dev_priv->dpll.shared_dplls[i].info = &dpll_info[i]; + dev_priv->display.dpll.shared_dplls[i].info = &dpll_info[i]; } - dev_priv->dpll.mgr = dpll_mgr; - dev_priv->dpll.num_shared_dpll = i; - mutex_init(&dev_priv->dpll.lock); - - BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS); + dev_priv->display.dpll.mgr = dpll_mgr; + dev_priv->display.dpll.num_shared_dpll = i; + mutex_init(&dev_priv->display.dpll.lock); } /** @@ -4229,7 +4259,7 @@ int intel_compute_shared_dplls(struct intel_atomic_state *state, struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr; + const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr; if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr)) return -EINVAL; @@ -4262,7 +4292,7 @@ int intel_reserve_shared_dplls(struct intel_atomic_state *state, struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr; + const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr; if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr)) return -EINVAL; @@ -4285,7 +4315,7 @@ void intel_release_shared_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr; + const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr; /* * FIXME: this function is called for every platform having a @@ -4314,7 +4344,7 @@ void intel_update_active_dpll(struct intel_atomic_state *state, struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr; + const struct intel_dpll_mgr *dpll_mgr = dev_priv->display.dpll.mgr; if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr)) return; @@ -4385,16 +4415,16 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915, void intel_dpll_update_ref_clks(struct drm_i915_private *i915) { - if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks) - i915->dpll.mgr->update_ref_clks(i915); + if (i915->display.dpll.mgr && i915->display.dpll.mgr->update_ref_clks) + i915->display.dpll.mgr->update_ref_clks(i915); } void intel_dpll_readout_hw_state(struct drm_i915_private *i915) { int i; - for (i = 0; i < i915->dpll.num_shared_dpll; i++) - readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]); + for (i = 0; i < i915->display.dpll.num_shared_dpll; i++) + readout_dpll_hw_state(i915, &i915->display.dpll.shared_dplls[i]); } static void sanitize_dpll_state(struct drm_i915_private *i915, @@ -4420,8 +4450,8 @@ void intel_dpll_sanitize_state(struct drm_i915_private *i915) { int i; - for (i = 0; i < i915->dpll.num_shared_dpll; i++) - sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]); + for (i = 0; i < i915->display.dpll.num_shared_dpll; i++) + sanitize_dpll_state(i915, &i915->display.dpll.shared_dplls[i]); } /** @@ -4434,8 +4464,8 @@ void intel_dpll_sanitize_state(struct drm_i915_private *i915) void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state) { - if (dev_priv->dpll.mgr) { - dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state); + if (dev_priv->display.dpll.mgr) { + dev_priv->display.dpll.mgr->dump_hw_state(dev_priv, hw_state); } else { /* fallback for platforms that don't use the shared dpll * infrastructure @@ -4533,7 +4563,7 @@ void intel_shared_dpll_verify_disabled(struct drm_i915_private *i915) { int i; - for (i = 0; i < i915->dpll.num_shared_dpll; i++) - verify_single_dpll_state(i915, &i915->dpll.shared_dplls[i], + for (i = 0; i < i915->display.dpll.num_shared_dpll; i++) + verify_single_dpll_state(i915, &i915->display.dpll.shared_dplls[i], NULL, NULL); } diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index c4affcb216fd..fc9c3e41c333 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -9,6 +9,36 @@ #include "i915_drv.h" #include "intel_de.h" #include "intel_display_types.h" +#include "intel_dsb.h" + +struct i915_vma; + +enum dsb_id { + INVALID_DSB = -1, + DSB1, + DSB2, + DSB3, + MAX_DSB_PER_PIPE +}; + +struct intel_dsb { + enum dsb_id id; + u32 *cmd_buf; + struct i915_vma *vma; + + /* + * free_pos will point the first free entry position + * and help in calculating tail of command buffer. + */ + int free_pos; + + /* + * ins_start_offset will help to store start address of the dsb + * instuction and help in identifying the batch of auto-increment + * register. + */ + u32 ins_start_offset; +}; #define DSB_BUF_SIZE (2 * PAGE_SIZE) diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h index 6cb9c580cdca..74dd2b3343bb 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.h +++ b/drivers/gpu/drm/i915/display/intel_dsb.h @@ -11,34 +11,6 @@ #include "i915_reg_defs.h" struct intel_crtc_state; -struct i915_vma; - -enum dsb_id { - INVALID_DSB = -1, - DSB1, - DSB2, - DSB3, - MAX_DSB_PER_PIPE -}; - -struct intel_dsb { - enum dsb_id id; - u32 *cmd_buf; - struct i915_vma *vma; - - /* - * free_pos will point the first free entry position - * and help in calculating tail of command buffer. - */ - int free_pos; - - /* - * ins_start_offset will help to store start address of the dsb - * instuction and help in identifying the batch of auto-increment - * register. - */ - u32 ins_start_offset; -}; void intel_dsb_prepare(struct intel_crtc_state *crtc_state); void intel_dsb_cleanup(struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c index 35e121cd226c..5efdd471ac2b 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.c +++ b/drivers/gpu/drm/i915/display/intel_dsi.c @@ -106,7 +106,7 @@ intel_dsi_get_panel_orientation(struct intel_connector *connector) if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN) return orientation; - orientation = dev_priv->vbt.orientation; + orientation = dev_priv->display.vbt.orientation; if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN) return orientation; diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h index eafef0a87fea..ce80bd8be519 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.h +++ b/drivers/gpu/drm/i915/display/intel_dsi.h @@ -89,9 +89,6 @@ struct intel_dsi { u8 escape_clk_div; u8 dual_link; - u16 dcs_backlight_ports; - u16 dcs_cabc_ports; - /* RGB or BGR */ bool bgr_enabled; diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c index 1bc7118c56a2..20e466d843ce 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c @@ -53,7 +53,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector, enum pipe unused enum port port; size_t len = panel->backlight.max > U8_MAX ? 2 : 1; - for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { + for_each_dsi_port(port, panel->vbt.dsi.bl_ports) { dsi_device = intel_dsi->dsi_hosts[port]->device; mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_DISPLAY_BRIGHTNESS, &data, len); @@ -80,7 +80,7 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 data[1] = level; } - for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { + for_each_dsi_port(port, panel->vbt.dsi.bl_ports) { dsi_device = intel_dsi->dsi_hosts[port]->device; mode_flags = dsi_device->mode_flags; dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM; @@ -93,12 +93,13 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 static void dcs_disable_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder)); + struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel; struct mipi_dsi_device *dsi_device; enum port port; dcs_set_backlight(conn_state, 0); - for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) { + for_each_dsi_port(port, panel->vbt.dsi.cabc_ports) { u8 cabc = POWER_SAVE_OFF; dsi_device = intel_dsi->dsi_hosts[port]->device; @@ -106,7 +107,7 @@ static void dcs_disable_backlight(const struct drm_connector_state *conn_state, &cabc, sizeof(cabc)); } - for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { + for_each_dsi_port(port, panel->vbt.dsi.bl_ports) { u8 ctrl = 0; dsi_device = intel_dsi->dsi_hosts[port]->device; @@ -127,10 +128,11 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state, u32 level) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder)); + struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel; struct mipi_dsi_device *dsi_device; enum port port; - for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { + for_each_dsi_port(port, panel->vbt.dsi.bl_ports) { u8 ctrl = 0; dsi_device = intel_dsi->dsi_hosts[port]->device; @@ -146,7 +148,7 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state, &ctrl, sizeof(ctrl)); } - for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) { + for_each_dsi_port(port, panel->vbt.dsi.cabc_ports) { u8 cabc = POWER_SAVE_MEDIUM; dsi_device = intel_dsi->dsi_hosts[port]->device; diff --git a/drivers/gpu/drm/i915/display/intel_dvo_dev.h b/drivers/gpu/drm/i915/display/intel_dvo_dev.h index d96c3cc46e50..50205f064d93 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo_dev.h +++ b/drivers/gpu/drm/i915/display/intel_dvo_dev.h @@ -75,8 +75,8 @@ struct intel_dvo_dev_ops { * * \return MODE_OK if the mode is valid, or another MODE_* otherwise. */ - int (*mode_valid)(struct intel_dvo_device *dvo, - struct drm_display_mode *mode); + enum drm_mode_status (*mode_valid)(struct intel_dvo_device *dvo, + struct drm_display_mode *mode); /* * Callback for preparing mode changes on an output diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 16537830ccf0..f38175304928 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -55,11 +55,11 @@ #define for_each_fbc_id(__dev_priv, __fbc_id) \ for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) \ - for_each_if(INTEL_INFO(__dev_priv)->display.fbc_mask & BIT(__fbc_id)) + for_each_if(RUNTIME_INFO(__dev_priv)->fbc_mask & BIT(__fbc_id)) #define for_each_intel_fbc(__dev_priv, __fbc, __fbc_id) \ for_each_fbc_id((__dev_priv), (__fbc_id)) \ - for_each_if((__fbc) = (__dev_priv)->fbc[(__fbc_id)]) + for_each_if((__fbc) = (__dev_priv)->display.fbc[(__fbc_id)]) struct intel_fbc_funcs { void (*activate)(struct intel_fbc *fbc); @@ -1098,6 +1098,12 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, return 0; } + /* Wa_14016291713 */ + if (IS_DISPLAY_VER(i915, 12, 13) && crtc_state->has_psr) { + plane_state->no_fbc_reason = "PSR1 enabled (Wa_14016291713)"; + return 0; + } + if (!pixel_format_is_valid(plane_state)) { plane_state->no_fbc_reason = "pixel format not supported"; return 0; @@ -1704,17 +1710,17 @@ void intel_fbc_init(struct drm_i915_private *i915) enum intel_fbc_id fbc_id; if (!drm_mm_initialized(&i915->mm.stolen)) - mkwrite_device_info(i915)->display.fbc_mask = 0; + RUNTIME_INFO(i915)->fbc_mask = 0; if (need_fbc_vtd_wa(i915)) - mkwrite_device_info(i915)->display.fbc_mask = 0; + RUNTIME_INFO(i915)->fbc_mask = 0; i915->params.enable_fbc = intel_sanitize_fbc_option(i915); drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n", i915->params.enable_fbc); for_each_fbc_id(i915, fbc_id) - i915->fbc[fbc_id] = intel_fbc_create(i915, fbc_id); + i915->display.fbc[fbc_id] = intel_fbc_create(i915, fbc_id); } /** @@ -1834,7 +1840,7 @@ void intel_fbc_debugfs_register(struct drm_i915_private *i915) struct drm_minor *minor = i915->drm.primary; struct intel_fbc *fbc; - fbc = i915->fbc[INTEL_FBC_A]; + fbc = i915->display.fbc[INTEL_FBC_A]; if (fbc) intel_fbc_debugfs_add(fbc, minor->debugfs_root); } diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h index db60143295ec..4adb98afe6ff 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.h +++ b/drivers/gpu/drm/i915/display/intel_fbc.h @@ -19,6 +19,7 @@ struct intel_plane_state; enum intel_fbc_id { INTEL_FBC_A, + INTEL_FBC_B, I915_MAX_FBCS, }; diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index 1922f62d04c0..112aa0447a0d 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -210,6 +210,12 @@ static int intelfb_create(struct drm_fb_helper *helper, struct drm_i915_gem_object *obj; int ret; + mutex_lock(&ifbdev->hpd_lock); + ret = ifbdev->hpd_suspended ? -EAGAIN : 0; + mutex_unlock(&ifbdev->hpd_lock); + if (ret) + return ret; + if (intel_fb && (sizes->fb_width > intel_fb->base.width || sizes->fb_height > intel_fb->base.height)) { @@ -500,7 +506,7 @@ static void intel_fbdev_suspend_worker(struct work_struct *work) { intel_fbdev_set_suspend(&container_of(work, struct drm_i915_private, - fbdev_suspend_work)->drm, + display.fbdev.suspend_work)->drm, FBINFO_STATE_RUNNING, true); } @@ -530,8 +536,8 @@ int intel_fbdev_init(struct drm_device *dev) return ret; } - dev_priv->fbdev = ifbdev; - INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker); + dev_priv->display.fbdev.fbdev = ifbdev; + INIT_WORK(&dev_priv->display.fbdev.suspend_work, intel_fbdev_suspend_worker); return 0; } @@ -548,7 +554,7 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie) void intel_fbdev_initial_config_async(struct drm_device *dev) { - struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; + struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev; if (!ifbdev) return; @@ -568,12 +574,13 @@ static void intel_fbdev_sync(struct intel_fbdev *ifbdev) void intel_fbdev_unregister(struct drm_i915_private *dev_priv) { - struct intel_fbdev *ifbdev = dev_priv->fbdev; + struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev; if (!ifbdev) return; - cancel_work_sync(&dev_priv->fbdev_suspend_work); + intel_fbdev_set_suspend(&dev_priv->drm, FBINFO_STATE_SUSPENDED, true); + if (!current_is_async()) intel_fbdev_sync(ifbdev); @@ -582,7 +589,7 @@ void intel_fbdev_unregister(struct drm_i915_private *dev_priv) void intel_fbdev_fini(struct drm_i915_private *dev_priv) { - struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->fbdev); + struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->display.fbdev.fbdev); if (!ifbdev) return; @@ -596,7 +603,7 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv) */ static void intel_fbdev_hpd_set_suspend(struct drm_i915_private *i915, int state) { - struct intel_fbdev *ifbdev = i915->fbdev; + struct intel_fbdev *ifbdev = i915->display.fbdev.fbdev; bool send_hpd = false; mutex_lock(&ifbdev->hpd_lock); @@ -614,11 +621,11 @@ static void intel_fbdev_hpd_set_suspend(struct drm_i915_private *i915, int state void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) { struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_fbdev *ifbdev = dev_priv->fbdev; + struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev; struct fb_info *info; if (!ifbdev || !ifbdev->vma) - return; + goto set_suspend; info = ifbdev->helper.fbdev; @@ -631,7 +638,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous * ourselves, so only flush outstanding work upon suspend! */ if (state != FBINFO_STATE_RUNNING) - flush_work(&dev_priv->fbdev_suspend_work); + flush_work(&dev_priv->display.fbdev.suspend_work); console_lock(); } else { @@ -645,7 +652,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous /* Don't block our own workqueue as this can * be run in parallel with other i915.ko tasks. */ - schedule_work(&dev_priv->fbdev_suspend_work); + schedule_work(&dev_priv->display.fbdev.suspend_work); return; } } @@ -661,12 +668,13 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous drm_fb_helper_set_suspend(&ifbdev->helper, state); console_unlock(); +set_suspend: intel_fbdev_hpd_set_suspend(dev_priv, state); } void intel_fbdev_output_poll_changed(struct drm_device *dev) { - struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; + struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev; bool send_hpd; if (!ifbdev) @@ -685,7 +693,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev) void intel_fbdev_restore_mode(struct drm_device *dev) { - struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; + struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev; if (!ifbdev) return; diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c index 67d2484afbaa..7f47e5c85c81 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.c +++ b/drivers/gpu/drm/i915/display/intel_fdi.c @@ -113,7 +113,7 @@ void intel_fdi_link_train(struct intel_crtc *crtc, { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - dev_priv->fdi_funcs->fdi_link_train(crtc, crtc_state); + dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state); } /* units of 100MHz */ @@ -210,14 +210,14 @@ void intel_fdi_pll_freq_update(struct drm_i915_private *i915) u32 fdi_pll_clk = intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK; - i915->fdi_pll_freq = (fdi_pll_clk + 2) * 10000; + i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000; } else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) { - i915->fdi_pll_freq = 270000; + i915->display.fdi.pll_freq = 270000; } else { return; } - drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->fdi_pll_freq); + drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq); } int intel_fdi_link_freq(struct drm_i915_private *i915, @@ -226,7 +226,7 @@ int intel_fdi_link_freq(struct drm_i915_private *i915, if (HAS_DDI(i915)) return pipe_config->port_clock; /* SPLL */ else - return i915->fdi_pll_freq; + return i915->display.fdi.pll_freq; } int ilk_fdi_compute_config(struct intel_crtc *crtc, @@ -256,7 +256,7 @@ retry: pipe_config->fdi_lanes = lane; intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, - link_bw, &pipe_config->fdi_m_n, false, false); + link_bw, &pipe_config->fdi_m_n, false); ret = ilk_check_fdi_lanes(dev, crtc->pipe, pipe_config); if (ret == -EDEADLK) @@ -789,7 +789,7 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); /* Enable the PCH Receiver FDI PLL */ - rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE | + rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE | FDI_RX_PLL_ENABLE | FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); @@ -1066,11 +1066,11 @@ void intel_fdi_init_hook(struct drm_i915_private *dev_priv) { if (IS_IRONLAKE(dev_priv)) { - dev_priv->fdi_funcs = &ilk_funcs; + dev_priv->display.funcs.fdi = &ilk_funcs; } else if (IS_SANDYBRIDGE(dev_priv)) { - dev_priv->fdi_funcs = &gen6_funcs; + dev_priv->display.funcs.fdi = &gen6_funcs; } else if (IS_IVYBRIDGE(dev_priv)) { /* FIXME: detect B0+ stepping and use auto training */ - dev_priv->fdi_funcs = &ivb_funcs; + dev_priv->display.funcs.fdi = &ivb_funcs; } } diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c index 791248f812aa..d80e3e8a9b01 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c @@ -81,9 +81,9 @@ static void frontbuffer_flush(struct drm_i915_private *i915, enum fb_op_origin origin) { /* Delay flushing when rings are still busy.*/ - spin_lock(&i915->fb_tracking.lock); - frontbuffer_bits &= ~i915->fb_tracking.busy_bits; - spin_unlock(&i915->fb_tracking.lock); + spin_lock(&i915->display.fb_tracking.lock); + frontbuffer_bits &= ~i915->display.fb_tracking.busy_bits; + spin_unlock(&i915->display.fb_tracking.lock); if (!frontbuffer_bits) return; @@ -111,11 +111,11 @@ static void frontbuffer_flush(struct drm_i915_private *i915, void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915, unsigned frontbuffer_bits) { - spin_lock(&i915->fb_tracking.lock); - i915->fb_tracking.flip_bits |= frontbuffer_bits; + spin_lock(&i915->display.fb_tracking.lock); + i915->display.fb_tracking.flip_bits |= frontbuffer_bits; /* Remove stale busy bits due to the old buffer. */ - i915->fb_tracking.busy_bits &= ~frontbuffer_bits; - spin_unlock(&i915->fb_tracking.lock); + i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits; + spin_unlock(&i915->display.fb_tracking.lock); } /** @@ -131,11 +131,11 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915, void intel_frontbuffer_flip_complete(struct drm_i915_private *i915, unsigned frontbuffer_bits) { - spin_lock(&i915->fb_tracking.lock); + spin_lock(&i915->display.fb_tracking.lock); /* Mask any cancelled flips. */ - frontbuffer_bits &= i915->fb_tracking.flip_bits; - i915->fb_tracking.flip_bits &= ~frontbuffer_bits; - spin_unlock(&i915->fb_tracking.lock); + frontbuffer_bits &= i915->display.fb_tracking.flip_bits; + i915->display.fb_tracking.flip_bits &= ~frontbuffer_bits; + spin_unlock(&i915->display.fb_tracking.lock); if (frontbuffer_bits) frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP); @@ -155,10 +155,10 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *i915, void intel_frontbuffer_flip(struct drm_i915_private *i915, unsigned frontbuffer_bits) { - spin_lock(&i915->fb_tracking.lock); + spin_lock(&i915->display.fb_tracking.lock); /* Remove stale busy bits due to the old buffer. */ - i915->fb_tracking.busy_bits &= ~frontbuffer_bits; - spin_unlock(&i915->fb_tracking.lock); + i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits; + spin_unlock(&i915->display.fb_tracking.lock); frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP); } @@ -170,10 +170,10 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front, struct drm_i915_private *i915 = to_i915(front->obj->base.dev); if (origin == ORIGIN_CS) { - spin_lock(&i915->fb_tracking.lock); - i915->fb_tracking.busy_bits |= frontbuffer_bits; - i915->fb_tracking.flip_bits &= ~frontbuffer_bits; - spin_unlock(&i915->fb_tracking.lock); + spin_lock(&i915->display.fb_tracking.lock); + i915->display.fb_tracking.busy_bits |= frontbuffer_bits; + i915->display.fb_tracking.flip_bits &= ~frontbuffer_bits; + spin_unlock(&i915->display.fb_tracking.lock); } trace_intel_frontbuffer_invalidate(frontbuffer_bits, origin); @@ -191,11 +191,11 @@ void __intel_fb_flush(struct intel_frontbuffer *front, struct drm_i915_private *i915 = to_i915(front->obj->base.dev); if (origin == ORIGIN_CS) { - spin_lock(&i915->fb_tracking.lock); + spin_lock(&i915->display.fb_tracking.lock); /* Filter out new bits since rendering started. */ - frontbuffer_bits &= i915->fb_tracking.busy_bits; - i915->fb_tracking.busy_bits &= ~frontbuffer_bits; - spin_unlock(&i915->fb_tracking.lock); + frontbuffer_bits &= i915->display.fb_tracking.busy_bits; + i915->display.fb_tracking.busy_bits &= ~frontbuffer_bits; + spin_unlock(&i915->display.fb_tracking.lock); } if (frontbuffer_bits) @@ -221,7 +221,7 @@ static void frontbuffer_retire(struct i915_active *ref) } static void frontbuffer_release(struct kref *ref) - __releases(&to_i915(front->obj->base.dev)->fb_tracking.lock) + __releases(&to_i915(front->obj->base.dev)->display.fb_tracking.lock) { struct intel_frontbuffer *front = container_of(ref, typeof(*front), ref); @@ -238,7 +238,7 @@ static void frontbuffer_release(struct kref *ref) spin_unlock(&obj->vma.lock); RCU_INIT_POINTER(obj->frontbuffer, NULL); - spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock); + spin_unlock(&to_i915(obj->base.dev)->display.fb_tracking.lock); i915_active_fini(&front->write); @@ -268,7 +268,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj) frontbuffer_retire, I915_ACTIVE_RETIRE_SLEEPS); - spin_lock(&i915->fb_tracking.lock); + spin_lock(&i915->display.fb_tracking.lock); if (rcu_access_pointer(obj->frontbuffer)) { kfree(front); front = rcu_dereference_protected(obj->frontbuffer, true); @@ -277,7 +277,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj) i915_gem_object_get(obj); rcu_assign_pointer(obj->frontbuffer, front); } - spin_unlock(&i915->fb_tracking.lock); + spin_unlock(&i915->display.fb_tracking.lock); return front; } @@ -286,7 +286,7 @@ void intel_frontbuffer_put(struct intel_frontbuffer *front) { kref_put_lock(&front->ref, frontbuffer_release, - &to_i915(front->obj->base.dev)->fb_tracking.lock); + &to_i915(front->obj->base.dev)->display.fb_tracking.lock); } /** @@ -311,6 +311,8 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old, */ BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > BITS_PER_TYPE(atomic_t)); + BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); + BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); if (old) { drm_WARN_ON(old->obj->base.dev, diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h index ff0c37b079aa..3c474ed937fb 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h @@ -25,6 +25,7 @@ #define __INTEL_FRONTBUFFER_H__ #include <linux/atomic.h> +#include <linux/bits.h> #include <linux/kref.h> #include "gem/i915_gem_object_types.h" @@ -48,6 +49,23 @@ struct intel_frontbuffer { struct rcu_head rcu; }; +/* + * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is + * considered to be the frontbuffer for the given plane interface-wise. This + * doesn't mean that the hw necessarily already scans it out, but that any + * rendering (by the cpu or gpu) will land in the frontbuffer eventually. + * + * We have one bit per pipe and per scanout plane type. + */ +#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 +#define INTEL_FRONTBUFFER(pipe, plane_id) \ + BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); +#define INTEL_FRONTBUFFER_OVERLAY(pipe) \ + BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) +#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ + GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \ + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) + void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915, unsigned frontbuffer_bits); void intel_frontbuffer_flip_complete(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index a6ba7fb72339..74443f57f62d 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -37,6 +37,7 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_gmbus.h" +#include "intel_gmbus_regs.h" struct intel_gmbus { struct i2c_adapter adapter; @@ -45,7 +46,7 @@ struct intel_gmbus { u32 reg0; i915_reg_t gpio_reg; struct i2c_algo_bit_data bit_algo; - struct drm_i915_private *dev_priv; + struct drm_i915_private *i915; }; struct gmbus_pin { @@ -116,6 +117,18 @@ static const struct gmbus_pin gmbus_pins_dg2[] = { [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ }, }; +static const struct gmbus_pin gmbus_pins_mtp[] = { + [GMBUS_PIN_1_BXT] = { "dpa", GPIOB }, + [GMBUS_PIN_2_BXT] = { "dpb", GPIOC }, + [GMBUS_PIN_3_BXT] = { "dpc", GPIOD }, + [GMBUS_PIN_4_CNP] = { "dpd", GPIOE }, + [GMBUS_PIN_5_MTP] = { "dpe", GPIOF }, + [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ }, + [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK }, + [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOL }, + [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM }, +}; + static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915, unsigned int pin) { @@ -128,6 +141,9 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *i915, } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) { pins = gmbus_pins_dg1; size = ARRAY_SIZE(gmbus_pins_dg1); + } else if (INTEL_PCH_TYPE(i915) >= PCH_MTP) { + pins = gmbus_pins_mtp; + size = ARRAY_SIZE(gmbus_pins_mtp); } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) { pins = gmbus_pins_icp; size = ARRAY_SIZE(gmbus_pins_icp); @@ -170,55 +186,55 @@ to_intel_gmbus(struct i2c_adapter *i2c) } void -intel_gmbus_reset(struct drm_i915_private *dev_priv) +intel_gmbus_reset(struct drm_i915_private *i915) { - intel_de_write(dev_priv, GMBUS0, 0); - intel_de_write(dev_priv, GMBUS4, 0); + intel_de_write(i915, GMBUS0(i915), 0); + intel_de_write(i915, GMBUS4(i915), 0); } -static void pnv_gmbus_clock_gating(struct drm_i915_private *dev_priv, +static void pnv_gmbus_clock_gating(struct drm_i915_private *i915, bool enable) { u32 val; /* When using bit bashing for I2C, this bit needs to be set to 1 */ - val = intel_de_read(dev_priv, DSPCLK_GATE_D); + val = intel_de_read(i915, DSPCLK_GATE_D(i915)); if (!enable) val |= PNV_GMBUSUNIT_CLOCK_GATE_DISABLE; else val &= ~PNV_GMBUSUNIT_CLOCK_GATE_DISABLE; - intel_de_write(dev_priv, DSPCLK_GATE_D, val); + intel_de_write(i915, DSPCLK_GATE_D(i915), val); } -static void pch_gmbus_clock_gating(struct drm_i915_private *dev_priv, +static void pch_gmbus_clock_gating(struct drm_i915_private *i915, bool enable) { u32 val; - val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); + val = intel_de_read(i915, SOUTH_DSPCLK_GATE_D); if (!enable) val |= PCH_GMBUSUNIT_CLOCK_GATE_DISABLE; else val &= ~PCH_GMBUSUNIT_CLOCK_GATE_DISABLE; - intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); + intel_de_write(i915, SOUTH_DSPCLK_GATE_D, val); } -static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv, +static void bxt_gmbus_clock_gating(struct drm_i915_private *i915, bool enable) { u32 val; - val = intel_de_read(dev_priv, GEN9_CLKGATE_DIS_4); + val = intel_de_read(i915, GEN9_CLKGATE_DIS_4); if (!enable) val |= BXT_GMBUS_GATING_DIS; else val &= ~BXT_GMBUS_GATING_DIS; - intel_de_write(dev_priv, GEN9_CLKGATE_DIS_4, val); + intel_de_write(i915, GEN9_CLKGATE_DIS_4, val); } static u32 get_reserved(struct intel_gmbus *bus) { - struct drm_i915_private *i915 = bus->dev_priv; + struct drm_i915_private *i915 = bus->i915; struct intel_uncore *uncore = &i915->uncore; u32 reserved = 0; @@ -234,7 +250,7 @@ static u32 get_reserved(struct intel_gmbus *bus) static int get_clock(void *data) { struct intel_gmbus *bus = data; - struct intel_uncore *uncore = &bus->dev_priv->uncore; + struct intel_uncore *uncore = &bus->i915->uncore; u32 reserved = get_reserved(bus); intel_uncore_write_notrace(uncore, @@ -249,7 +265,7 @@ static int get_clock(void *data) static int get_data(void *data) { struct intel_gmbus *bus = data; - struct intel_uncore *uncore = &bus->dev_priv->uncore; + struct intel_uncore *uncore = &bus->i915->uncore; u32 reserved = get_reserved(bus); intel_uncore_write_notrace(uncore, @@ -264,7 +280,7 @@ static int get_data(void *data) static void set_clock(void *data, int state_high) { struct intel_gmbus *bus = data; - struct intel_uncore *uncore = &bus->dev_priv->uncore; + struct intel_uncore *uncore = &bus->i915->uncore; u32 reserved = get_reserved(bus); u32 clock_bits; @@ -283,7 +299,7 @@ static void set_clock(void *data, int state_high) static void set_data(void *data, int state_high) { struct intel_gmbus *bus = data; - struct intel_uncore *uncore = &bus->dev_priv->uncore; + struct intel_uncore *uncore = &bus->i915->uncore; u32 reserved = get_reserved(bus); u32 data_bits; @@ -301,12 +317,12 @@ static int intel_gpio_pre_xfer(struct i2c_adapter *adapter) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *dev_priv = bus->dev_priv; + struct drm_i915_private *i915 = bus->i915; - intel_gmbus_reset(dev_priv); + intel_gmbus_reset(i915); - if (IS_PINEVIEW(dev_priv)) - pnv_gmbus_clock_gating(dev_priv, false); + if (IS_PINEVIEW(i915)) + pnv_gmbus_clock_gating(i915, false); set_data(bus, 1); set_clock(bus, 1); @@ -318,13 +334,13 @@ static void intel_gpio_post_xfer(struct i2c_adapter *adapter) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *dev_priv = bus->dev_priv; + struct drm_i915_private *i915 = bus->i915; set_data(bus, 1); set_clock(bus, 1); - if (IS_PINEVIEW(dev_priv)) - pnv_gmbus_clock_gating(dev_priv, true); + if (IS_PINEVIEW(i915)) + pnv_gmbus_clock_gating(i915, true); } static void @@ -356,7 +372,7 @@ static bool has_gmbus_irq(struct drm_i915_private *i915) return HAS_GMBUS_IRQ(i915) && intel_irqs_enabled(i915); } -static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en) +static int gmbus_wait(struct drm_i915_private *i915, u32 status, u32 irq_en) { DEFINE_WAIT(wait); u32 gmbus2; @@ -366,21 +382,21 @@ static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en) * we also need to check for NAKs besides the hw ready/idle signal, we * need to wake up periodically and check that ourselves. */ - if (!has_gmbus_irq(dev_priv)) + if (!has_gmbus_irq(i915)) irq_en = 0; - add_wait_queue(&dev_priv->gmbus_wait_queue, &wait); - intel_de_write_fw(dev_priv, GMBUS4, irq_en); + add_wait_queue(&i915->display.gmbus.wait_queue, &wait); + intel_de_write_fw(i915, GMBUS4(i915), irq_en); status |= GMBUS_SATOER; - ret = wait_for_us((gmbus2 = intel_de_read_fw(dev_priv, GMBUS2)) & status, + ret = wait_for_us((gmbus2 = intel_de_read_fw(i915, GMBUS2(i915))) & status, 2); if (ret) - ret = wait_for((gmbus2 = intel_de_read_fw(dev_priv, GMBUS2)) & status, + ret = wait_for((gmbus2 = intel_de_read_fw(i915, GMBUS2(i915))) & status, 50); - intel_de_write_fw(dev_priv, GMBUS4, 0); - remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait); + intel_de_write_fw(i915, GMBUS4(i915), 0); + remove_wait_queue(&i915->display.gmbus.wait_queue, &wait); if (gmbus2 & GMBUS_SATOER) return -ENXIO; @@ -389,7 +405,7 @@ static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en) } static int -gmbus_wait_idle(struct drm_i915_private *dev_priv) +gmbus_wait_idle(struct drm_i915_private *i915) { DEFINE_WAIT(wait); u32 irq_enable; @@ -397,35 +413,35 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv) /* Important: The hw handles only the first bit, so set only one! */ irq_enable = 0; - if (has_gmbus_irq(dev_priv)) + if (has_gmbus_irq(i915)) irq_enable = GMBUS_IDLE_EN; - add_wait_queue(&dev_priv->gmbus_wait_queue, &wait); - intel_de_write_fw(dev_priv, GMBUS4, irq_enable); + add_wait_queue(&i915->display.gmbus.wait_queue, &wait); + intel_de_write_fw(i915, GMBUS4(i915), irq_enable); - ret = intel_wait_for_register_fw(&dev_priv->uncore, - GMBUS2, GMBUS_ACTIVE, 0, + ret = intel_wait_for_register_fw(&i915->uncore, + GMBUS2(i915), GMBUS_ACTIVE, 0, 10); - intel_de_write_fw(dev_priv, GMBUS4, 0); - remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait); + intel_de_write_fw(i915, GMBUS4(i915), 0); + remove_wait_queue(&i915->display.gmbus.wait_queue, &wait); return ret; } -static unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv) +static unsigned int gmbus_max_xfer_size(struct drm_i915_private *i915) { - return DISPLAY_VER(dev_priv) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX : + return DISPLAY_VER(i915) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX : GMBUS_BYTE_COUNT_MAX; } static int -gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv, +gmbus_xfer_read_chunk(struct drm_i915_private *i915, unsigned short addr, u8 *buf, unsigned int len, u32 gmbus0_reg, u32 gmbus1_index) { unsigned int size = len; - bool burst_read = len > gmbus_max_xfer_size(dev_priv); + bool burst_read = len > gmbus_max_xfer_size(i915); bool extra_byte_added = false; if (burst_read) { @@ -438,21 +454,21 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv, len++; } size = len % 256 + 256; - intel_de_write_fw(dev_priv, GMBUS0, + intel_de_write_fw(i915, GMBUS0(i915), gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE); } - intel_de_write_fw(dev_priv, GMBUS1, + intel_de_write_fw(i915, GMBUS1(i915), gmbus1_index | GMBUS_CYCLE_WAIT | (size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY); while (len) { int ret; u32 val, loop = 0; - ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN); + ret = gmbus_wait(i915, GMBUS_HW_RDY, GMBUS_HW_RDY_EN); if (ret) return ret; - val = intel_de_read_fw(dev_priv, GMBUS3); + val = intel_de_read_fw(i915, GMBUS3(i915)); do { if (extra_byte_added && len == 1) break; @@ -463,7 +479,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv, if (burst_read && len == size - 4) /* Reset the override bit */ - intel_de_write_fw(dev_priv, GMBUS0, gmbus0_reg); + intel_de_write_fw(i915, GMBUS0(i915), gmbus0_reg); } return 0; @@ -480,7 +496,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv, #define INTEL_GMBUS_BURST_READ_MAX_LEN 767U static int -gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, +gmbus_xfer_read(struct drm_i915_private *i915, struct i2c_msg *msg, u32 gmbus0_reg, u32 gmbus1_index) { u8 *buf = msg->buf; @@ -489,12 +505,12 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, int ret; do { - if (HAS_GMBUS_BURST_READ(dev_priv)) + if (HAS_GMBUS_BURST_READ(i915)) len = min(rx_size, INTEL_GMBUS_BURST_READ_MAX_LEN); else - len = min(rx_size, gmbus_max_xfer_size(dev_priv)); + len = min(rx_size, gmbus_max_xfer_size(i915)); - ret = gmbus_xfer_read_chunk(dev_priv, msg->addr, buf, len, + ret = gmbus_xfer_read_chunk(i915, msg->addr, buf, len, gmbus0_reg, gmbus1_index); if (ret) return ret; @@ -507,7 +523,7 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, } static int -gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, +gmbus_xfer_write_chunk(struct drm_i915_private *i915, unsigned short addr, u8 *buf, unsigned int len, u32 gmbus1_index) { @@ -520,8 +536,8 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, len -= 1; } - intel_de_write_fw(dev_priv, GMBUS3, val); - intel_de_write_fw(dev_priv, GMBUS1, + intel_de_write_fw(i915, GMBUS3(i915), val); + intel_de_write_fw(i915, GMBUS1(i915), gmbus1_index | GMBUS_CYCLE_WAIT | (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); while (len) { int ret; @@ -531,9 +547,9 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, val |= *buf++ << (8 * loop); } while (--len && ++loop < 4); - intel_de_write_fw(dev_priv, GMBUS3, val); + intel_de_write_fw(i915, GMBUS3(i915), val); - ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN); + ret = gmbus_wait(i915, GMBUS_HW_RDY, GMBUS_HW_RDY_EN); if (ret) return ret; } @@ -542,7 +558,7 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, } static int -gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg, +gmbus_xfer_write(struct drm_i915_private *i915, struct i2c_msg *msg, u32 gmbus1_index) { u8 *buf = msg->buf; @@ -551,9 +567,9 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg, int ret; do { - len = min(tx_size, gmbus_max_xfer_size(dev_priv)); + len = min(tx_size, gmbus_max_xfer_size(i915)); - ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len, + ret = gmbus_xfer_write_chunk(i915, msg->addr, buf, len, gmbus1_index); if (ret) return ret; @@ -580,7 +596,7 @@ gmbus_is_index_xfer(struct i2c_msg *msgs, int i, int num) } static int -gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs, +gmbus_index_xfer(struct drm_i915_private *i915, struct i2c_msg *msgs, u32 gmbus0_reg) { u32 gmbus1_index = 0; @@ -596,17 +612,17 @@ gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs, /* GMBUS5 holds 16-bit index */ if (gmbus5) - intel_de_write_fw(dev_priv, GMBUS5, gmbus5); + intel_de_write_fw(i915, GMBUS5(i915), gmbus5); if (msgs[1].flags & I2C_M_RD) - ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus0_reg, + ret = gmbus_xfer_read(i915, &msgs[1], gmbus0_reg, gmbus1_index); else - ret = gmbus_xfer_write(dev_priv, &msgs[1], gmbus1_index); + ret = gmbus_xfer_write(i915, &msgs[1], gmbus1_index); /* Clear GMBUS5 after each index transfer */ if (gmbus5) - intel_de_write_fw(dev_priv, GMBUS5, 0); + intel_de_write_fw(i915, GMBUS5(i915), 0); return ret; } @@ -616,34 +632,34 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num, u32 gmbus0_source) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *dev_priv = bus->dev_priv; + struct drm_i915_private *i915 = bus->i915; int i = 0, inc, try = 0; int ret = 0; /* Display WA #0868: skl,bxt,kbl,cfl,glk */ - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - bxt_gmbus_clock_gating(dev_priv, false); - else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_CNP(dev_priv)) - pch_gmbus_clock_gating(dev_priv, false); + if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) + bxt_gmbus_clock_gating(i915, false); + else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915)) + pch_gmbus_clock_gating(i915, false); retry: - intel_de_write_fw(dev_priv, GMBUS0, gmbus0_source | bus->reg0); + intel_de_write_fw(i915, GMBUS0(i915), gmbus0_source | bus->reg0); for (; i < num; i += inc) { inc = 1; if (gmbus_is_index_xfer(msgs, i, num)) { - ret = gmbus_index_xfer(dev_priv, &msgs[i], + ret = gmbus_index_xfer(i915, &msgs[i], gmbus0_source | bus->reg0); inc = 2; /* an index transmission is two msgs */ } else if (msgs[i].flags & I2C_M_RD) { - ret = gmbus_xfer_read(dev_priv, &msgs[i], + ret = gmbus_xfer_read(i915, &msgs[i], gmbus0_source | bus->reg0, 0); } else { - ret = gmbus_xfer_write(dev_priv, &msgs[i], 0); + ret = gmbus_xfer_write(i915, &msgs[i], 0); } if (!ret) - ret = gmbus_wait(dev_priv, + ret = gmbus_wait(i915, GMBUS_HW_WAIT_PHASE, GMBUS_HW_WAIT_EN); if (ret == -ETIMEDOUT) goto timeout; @@ -655,19 +671,19 @@ retry: * a STOP on the very first cycle. To simplify the code we * unconditionally generate the STOP condition with an additional gmbus * cycle. */ - intel_de_write_fw(dev_priv, GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY); + intel_de_write_fw(i915, GMBUS1(i915), GMBUS_CYCLE_STOP | GMBUS_SW_RDY); /* Mark the GMBUS interface as disabled after waiting for idle. * We will re-enable it at the start of the next xfer, * till then let it sleep. */ - if (gmbus_wait_idle(dev_priv)) { - drm_dbg_kms(&dev_priv->drm, + if (gmbus_wait_idle(i915)) { + drm_dbg_kms(&i915->drm, "GMBUS [%s] timed out waiting for idle\n", adapter->name); ret = -ETIMEDOUT; } - intel_de_write_fw(dev_priv, GMBUS0, 0); + intel_de_write_fw(i915, GMBUS0(i915), 0); ret = ret ?: i; goto out; @@ -686,8 +702,8 @@ clear_err: * it's slow responding and only answers on the 2nd retry. */ ret = -ENXIO; - if (gmbus_wait_idle(dev_priv)) { - drm_dbg_kms(&dev_priv->drm, + if (gmbus_wait_idle(i915)) { + drm_dbg_kms(&i915->drm, "GMBUS [%s] timed out after NAK\n", adapter->name); ret = -ETIMEDOUT; @@ -697,11 +713,11 @@ clear_err: * of resetting the GMBUS controller and so clearing the * BUS_ERROR raised by the slave's NAK. */ - intel_de_write_fw(dev_priv, GMBUS1, GMBUS_SW_CLR_INT); - intel_de_write_fw(dev_priv, GMBUS1, 0); - intel_de_write_fw(dev_priv, GMBUS0, 0); + intel_de_write_fw(i915, GMBUS1(i915), GMBUS_SW_CLR_INT); + intel_de_write_fw(i915, GMBUS1(i915), 0); + intel_de_write_fw(i915, GMBUS0(i915), 0); - drm_dbg_kms(&dev_priv->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n", + drm_dbg_kms(&i915->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n", adapter->name, msgs[i].addr, (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len); @@ -712,7 +728,7 @@ clear_err: * drm_do_probe_ddc_edid, which bails out on the first -ENXIO. */ if (ret == -ENXIO && i == 0 && try++ == 0) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "GMBUS [%s] NAK on first message, retry\n", adapter->name); goto retry; @@ -721,10 +737,10 @@ clear_err: goto out; timeout: - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "GMBUS [%s] timed out, falling back to bit banging on pin %d\n", bus->adapter.name, bus->reg0 & 0xff); - intel_de_write_fw(dev_priv, GMBUS0, 0); + intel_de_write_fw(i915, GMBUS0(i915), 0); /* * Hardware may not support GMBUS over these pins? Try GPIO bitbanging @@ -734,10 +750,10 @@ timeout: out: /* Display WA #0868: skl,bxt,kbl,cfl,glk */ - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - bxt_gmbus_clock_gating(dev_priv, true); - else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_CNP(dev_priv)) - pch_gmbus_clock_gating(dev_priv, true); + if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) + bxt_gmbus_clock_gating(i915, true); + else if (HAS_PCH_SPT(i915) || HAS_PCH_CNP(i915)) + pch_gmbus_clock_gating(i915, true); return ret; } @@ -746,11 +762,11 @@ static int gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *dev_priv = bus->dev_priv; + struct drm_i915_private *i915 = bus->i915; intel_wakeref_t wakeref; int ret; - wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); + wakeref = intel_display_power_get(i915, POWER_DOMAIN_GMBUS); if (bus->force_bit) { ret = i2c_bit_algo.master_xfer(adapter, msgs, num); @@ -762,7 +778,7 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) bus->force_bit |= GMBUS_FORCE_BIT_RETRY; } - intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref); + intel_display_power_put(i915, POWER_DOMAIN_GMBUS, wakeref); return ret; } @@ -770,7 +786,7 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) int intel_gmbus_output_aksv(struct i2c_adapter *adapter) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *dev_priv = bus->dev_priv; + struct drm_i915_private *i915 = bus->i915; u8 cmd = DRM_HDCP_DDC_AKSV; u8 buf[DRM_HDCP_KSV_LEN] = { 0 }; struct i2c_msg msgs[] = { @@ -790,8 +806,8 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter) intel_wakeref_t wakeref; int ret; - wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); - mutex_lock(&dev_priv->gmbus_mutex); + wakeref = intel_display_power_get(i915, POWER_DOMAIN_GMBUS); + mutex_lock(&i915->display.gmbus.mutex); /* * In order to output Aksv to the receiver, use an indexed write to @@ -800,8 +816,8 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter) */ ret = do_gmbus_xfer(adapter, msgs, ARRAY_SIZE(msgs), GMBUS_AKSV_SELECT); - mutex_unlock(&dev_priv->gmbus_mutex); - intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref); + mutex_unlock(&i915->display.gmbus.mutex); + intel_display_power_put(i915, POWER_DOMAIN_GMBUS, wakeref); return ret; } @@ -824,27 +840,27 @@ static void gmbus_lock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *dev_priv = bus->dev_priv; + struct drm_i915_private *i915 = bus->i915; - mutex_lock(&dev_priv->gmbus_mutex); + mutex_lock(&i915->display.gmbus.mutex); } static int gmbus_trylock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *dev_priv = bus->dev_priv; + struct drm_i915_private *i915 = bus->i915; - return mutex_trylock(&dev_priv->gmbus_mutex); + return mutex_trylock(&i915->display.gmbus.mutex); } static void gmbus_unlock_bus(struct i2c_adapter *adapter, unsigned int flags) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *dev_priv = bus->dev_priv; + struct drm_i915_private *i915 = bus->i915; - mutex_unlock(&dev_priv->gmbus_mutex); + mutex_unlock(&i915->display.gmbus.mutex); } static const struct i2c_lock_operations gmbus_lock_ops = { @@ -855,31 +871,31 @@ static const struct i2c_lock_operations gmbus_lock_ops = { /** * intel_gmbus_setup - instantiate all Intel i2c GMBuses - * @dev_priv: i915 device private + * @i915: i915 device private */ -int intel_gmbus_setup(struct drm_i915_private *dev_priv) +int intel_gmbus_setup(struct drm_i915_private *i915) { - struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); unsigned int pin; int ret; - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE; - else if (!HAS_GMCH(dev_priv)) + if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) + i915->display.gmbus.mmio_base = VLV_DISPLAY_BASE; + else if (!HAS_GMCH(i915)) /* * Broxton uses the same PCH offsets for South Display Engine, * even though it doesn't have a PCH. */ - dev_priv->gpio_mmio_base = PCH_DISPLAY_BASE; + i915->display.gmbus.mmio_base = PCH_DISPLAY_BASE; - mutex_init(&dev_priv->gmbus_mutex); - init_waitqueue_head(&dev_priv->gmbus_wait_queue); + mutex_init(&i915->display.gmbus.mutex); + init_waitqueue_head(&i915->display.gmbus.wait_queue); - for (pin = 0; pin < ARRAY_SIZE(dev_priv->gmbus); pin++) { + for (pin = 0; pin < ARRAY_SIZE(i915->display.gmbus.bus); pin++) { const struct gmbus_pin *gmbus_pin; struct intel_gmbus *bus; - gmbus_pin = get_gmbus_pin(dev_priv, pin); + gmbus_pin = get_gmbus_pin(i915, pin); if (!gmbus_pin) continue; @@ -896,7 +912,7 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv) "i915 gmbus %s", gmbus_pin->name); bus->adapter.dev.parent = &pdev->dev; - bus->dev_priv = dev_priv; + bus->i915 = i915; bus->adapter.algo = &gmbus_algorithm; bus->adapter.lock_ops = &gmbus_lock_ops; @@ -911,10 +927,10 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv) bus->reg0 = pin | GMBUS_RATE_100KHZ; /* gmbus seems to be broken on i830 */ - if (IS_I830(dev_priv)) + if (IS_I830(i915)) bus->force_bit = 1; - intel_gpio_setup(bus, GPIO(gmbus_pin->gpio)); + intel_gpio_setup(bus, GPIO(i915, gmbus_pin->gpio)); ret = i2c_add_adapter(&bus->adapter); if (ret) { @@ -922,43 +938,43 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv) goto err; } - dev_priv->gmbus[pin] = bus; + i915->display.gmbus.bus[pin] = bus; } - intel_gmbus_reset(dev_priv); + intel_gmbus_reset(i915); return 0; err: - intel_gmbus_teardown(dev_priv); + intel_gmbus_teardown(i915); return ret; } -struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, +struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *i915, unsigned int pin) { - if (drm_WARN_ON(&dev_priv->drm, pin >= ARRAY_SIZE(dev_priv->gmbus) || - !dev_priv->gmbus[pin])) + if (drm_WARN_ON(&i915->drm, pin >= ARRAY_SIZE(i915->display.gmbus.bus) || + !i915->display.gmbus.bus[pin])) return NULL; - return &dev_priv->gmbus[pin]->adapter; + return &i915->display.gmbus.bus[pin]->adapter; } void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) { struct intel_gmbus *bus = to_intel_gmbus(adapter); - struct drm_i915_private *dev_priv = bus->dev_priv; + struct drm_i915_private *i915 = bus->i915; - mutex_lock(&dev_priv->gmbus_mutex); + mutex_lock(&i915->display.gmbus.mutex); bus->force_bit += force_bit ? 1 : -1; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "%sabling bit-banging on %s. force bit now %d\n", force_bit ? "en" : "dis", adapter->name, bus->force_bit); - mutex_unlock(&dev_priv->gmbus_mutex); + mutex_unlock(&i915->display.gmbus.mutex); } bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) @@ -968,20 +984,20 @@ bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) return bus->force_bit; } -void intel_gmbus_teardown(struct drm_i915_private *dev_priv) +void intel_gmbus_teardown(struct drm_i915_private *i915) { unsigned int pin; - for (pin = 0; pin < ARRAY_SIZE(dev_priv->gmbus); pin++) { + for (pin = 0; pin < ARRAY_SIZE(i915->display.gmbus.bus); pin++) { struct intel_gmbus *bus; - bus = dev_priv->gmbus[pin]; + bus = i915->display.gmbus.bus[pin]; if (!bus) continue; i2c_del_adapter(&bus->adapter); kfree(bus); - dev_priv->gmbus[pin] = NULL; + i915->display.gmbus.bus[pin] = NULL; } } diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.h b/drivers/gpu/drm/i915/display/intel_gmbus.h index 8edc2e99cf53..20f704bd4e70 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.h +++ b/drivers/gpu/drm/i915/display/intel_gmbus.h @@ -24,6 +24,7 @@ struct i2c_adapter; #define GMBUS_PIN_2_BXT 2 #define GMBUS_PIN_3_BXT 3 #define GMBUS_PIN_4_CNP 4 +#define GMBUS_PIN_5_MTP 5 #define GMBUS_PIN_9_TC1_ICP 9 #define GMBUS_PIN_10_TC2_ICP 10 #define GMBUS_PIN_11_TC3_ICP 11 diff --git a/drivers/gpu/drm/i915/display/intel_gmbus_regs.h b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h new file mode 100644 index 000000000000..53aacbda983c --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_gmbus_regs.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_GMBUS_REGS_H__ +#define __INTEL_GMBUS_REGS_H__ + +#include "i915_reg_defs.h" + +#define GMBUS_MMIO_BASE(__i915) ((__i915)->display.gmbus.mmio_base) + +#define GPIO(__i915, gpio) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5010 + 4 * (gpio)) +#define GPIO_CLOCK_DIR_MASK (1 << 0) +#define GPIO_CLOCK_DIR_IN (0 << 1) +#define GPIO_CLOCK_DIR_OUT (1 << 1) +#define GPIO_CLOCK_VAL_MASK (1 << 2) +#define GPIO_CLOCK_VAL_OUT (1 << 3) +#define GPIO_CLOCK_VAL_IN (1 << 4) +#define GPIO_CLOCK_PULLUP_DISABLE (1 << 5) +#define GPIO_DATA_DIR_MASK (1 << 8) +#define GPIO_DATA_DIR_IN (0 << 9) +#define GPIO_DATA_DIR_OUT (1 << 9) +#define GPIO_DATA_VAL_MASK (1 << 10) +#define GPIO_DATA_VAL_OUT (1 << 11) +#define GPIO_DATA_VAL_IN (1 << 12) +#define GPIO_DATA_PULLUP_DISABLE (1 << 13) + +/* clock/port select */ +#define GMBUS0(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5100) +#define GMBUS_AKSV_SELECT (1 << 11) +#define GMBUS_RATE_100KHZ (0 << 8) +#define GMBUS_RATE_50KHZ (1 << 8) +#define GMBUS_RATE_400KHZ (2 << 8) /* reserved on Pineview */ +#define GMBUS_RATE_1MHZ (3 << 8) /* reserved on Pineview */ +#define GMBUS_HOLD_EXT (1 << 7) /* 300ns hold time, rsvd on Pineview */ +#define GMBUS_BYTE_CNT_OVERRIDE (1 << 6) + +/* command/status */ +#define GMBUS1(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5104) +#define GMBUS_SW_CLR_INT (1 << 31) +#define GMBUS_SW_RDY (1 << 30) +#define GMBUS_ENT (1 << 29) /* enable timeout */ +#define GMBUS_CYCLE_NONE (0 << 25) +#define GMBUS_CYCLE_WAIT (1 << 25) +#define GMBUS_CYCLE_INDEX (2 << 25) +#define GMBUS_CYCLE_STOP (4 << 25) +#define GMBUS_BYTE_COUNT_SHIFT 16 +#define GMBUS_BYTE_COUNT_MAX 256U +#define GEN9_GMBUS_BYTE_COUNT_MAX 511U +#define GMBUS_SLAVE_INDEX_SHIFT 8 +#define GMBUS_SLAVE_ADDR_SHIFT 1 +#define GMBUS_SLAVE_READ (1 << 0) +#define GMBUS_SLAVE_WRITE (0 << 0) + +/* status */ +#define GMBUS2(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5108) +#define GMBUS_INUSE (1 << 15) +#define GMBUS_HW_WAIT_PHASE (1 << 14) +#define GMBUS_STALL_TIMEOUT (1 << 13) +#define GMBUS_INT (1 << 12) +#define GMBUS_HW_RDY (1 << 11) +#define GMBUS_SATOER (1 << 10) +#define GMBUS_ACTIVE (1 << 9) + +/* data buffer bytes 3-0 */ +#define GMBUS3(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x510c) + +/* interrupt mask (Pineview+) */ +#define GMBUS4(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5110) +#define GMBUS_SLAVE_TIMEOUT_EN (1 << 4) +#define GMBUS_NAK_EN (1 << 3) +#define GMBUS_IDLE_EN (1 << 2) +#define GMBUS_HW_WAIT_EN (1 << 1) +#define GMBUS_HW_RDY_EN (1 << 0) + +/* byte index */ +#define GMBUS5(__i915) _MMIO(GMBUS_MMIO_BASE(__i915) + 0x5120) +#define GMBUS_2BYTE_INDEX_EN (1 << 31) + +#endif /* __INTEL_GMBUS_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 7dbc9f0bb24f..6406fd487ee5 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -23,6 +23,7 @@ #include "intel_display_power_well.h" #include "intel_display_types.h" #include "intel_hdcp.h" +#include "intel_hdcp_regs.h" #include "intel_pcode.h" #define KEY_LOAD_TRIES 5 @@ -209,12 +210,12 @@ bool intel_hdcp2_capable(struct intel_connector *connector) return false; /* MEI interface is solid */ - mutex_lock(&dev_priv->hdcp_comp_mutex); - if (!dev_priv->hdcp_comp_added || !dev_priv->hdcp_master) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + if (!dev_priv->display.hdcp.comp_added || !dev_priv->display.hdcp.master) { + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return false; } - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); /* Sink's capability for HDCP2.2 */ hdcp->shim->hdcp_2_2_capable(dig_port, &capable); @@ -1131,8 +1132,8 @@ static void intel_hdcp_prop_work(struct work_struct *work) bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port) { - return INTEL_INFO(dev_priv)->display.has_hdcp && - (DISPLAY_VER(dev_priv) >= 12 || port < PORT_E); + return RUNTIME_INFO(dev_priv)->has_hdcp && + (DISPLAY_VER(dev_priv) >= 12 || port < PORT_E); } static int @@ -1145,11 +1146,11 @@ hdcp2_prepare_ake_init(struct intel_connector *connector, struct i915_hdcp_comp_master *comp; int ret; - mutex_lock(&dev_priv->hdcp_comp_mutex); - comp = dev_priv->hdcp_master; + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + comp = dev_priv->display.hdcp.master; if (!comp || !comp->ops) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } @@ -1157,7 +1158,7 @@ hdcp2_prepare_ake_init(struct intel_connector *connector, if (ret) drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n", ret); - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return ret; } @@ -1175,11 +1176,11 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, struct i915_hdcp_comp_master *comp; int ret; - mutex_lock(&dev_priv->hdcp_comp_mutex); - comp = dev_priv->hdcp_master; + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + comp = dev_priv->display.hdcp.master; if (!comp || !comp->ops) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } @@ -1189,7 +1190,7 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n", ret); - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return ret; } @@ -1203,18 +1204,18 @@ static int hdcp2_verify_hprime(struct intel_connector *connector, struct i915_hdcp_comp_master *comp; int ret; - mutex_lock(&dev_priv->hdcp_comp_mutex); - comp = dev_priv->hdcp_master; + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + comp = dev_priv->display.hdcp.master; if (!comp || !comp->ops) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime); if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret); - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return ret; } @@ -1229,11 +1230,11 @@ hdcp2_store_pairing_info(struct intel_connector *connector, struct i915_hdcp_comp_master *comp; int ret; - mutex_lock(&dev_priv->hdcp_comp_mutex); - comp = dev_priv->hdcp_master; + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + comp = dev_priv->display.hdcp.master; if (!comp || !comp->ops) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } @@ -1241,7 +1242,7 @@ hdcp2_store_pairing_info(struct intel_connector *connector, if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n", ret); - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return ret; } @@ -1256,11 +1257,11 @@ hdcp2_prepare_lc_init(struct intel_connector *connector, struct i915_hdcp_comp_master *comp; int ret; - mutex_lock(&dev_priv->hdcp_comp_mutex); - comp = dev_priv->hdcp_master; + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + comp = dev_priv->display.hdcp.master; if (!comp || !comp->ops) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } @@ -1268,7 +1269,7 @@ hdcp2_prepare_lc_init(struct intel_connector *connector, if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n", ret); - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return ret; } @@ -1283,11 +1284,11 @@ hdcp2_verify_lprime(struct intel_connector *connector, struct i915_hdcp_comp_master *comp; int ret; - mutex_lock(&dev_priv->hdcp_comp_mutex); - comp = dev_priv->hdcp_master; + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + comp = dev_priv->display.hdcp.master; if (!comp || !comp->ops) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } @@ -1295,7 +1296,7 @@ hdcp2_verify_lprime(struct intel_connector *connector, if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n", ret); - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return ret; } @@ -1309,11 +1310,11 @@ static int hdcp2_prepare_skey(struct intel_connector *connector, struct i915_hdcp_comp_master *comp; int ret; - mutex_lock(&dev_priv->hdcp_comp_mutex); - comp = dev_priv->hdcp_master; + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + comp = dev_priv->display.hdcp.master; if (!comp || !comp->ops) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } @@ -1321,7 +1322,7 @@ static int hdcp2_prepare_skey(struct intel_connector *connector, if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n", ret); - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return ret; } @@ -1338,11 +1339,11 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, struct i915_hdcp_comp_master *comp; int ret; - mutex_lock(&dev_priv->hdcp_comp_mutex); - comp = dev_priv->hdcp_master; + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + comp = dev_priv->display.hdcp.master; if (!comp || !comp->ops) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } @@ -1352,7 +1353,7 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Verify rep topology failed. %d\n", ret); - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return ret; } @@ -1367,18 +1368,18 @@ hdcp2_verify_mprime(struct intel_connector *connector, struct i915_hdcp_comp_master *comp; int ret; - mutex_lock(&dev_priv->hdcp_comp_mutex); - comp = dev_priv->hdcp_master; + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + comp = dev_priv->display.hdcp.master; if (!comp || !comp->ops) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready); if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret); - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return ret; } @@ -1391,11 +1392,11 @@ static int hdcp2_authenticate_port(struct intel_connector *connector) struct i915_hdcp_comp_master *comp; int ret; - mutex_lock(&dev_priv->hdcp_comp_mutex); - comp = dev_priv->hdcp_master; + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + comp = dev_priv->display.hdcp.master; if (!comp || !comp->ops) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } @@ -1403,7 +1404,7 @@ static int hdcp2_authenticate_port(struct intel_connector *connector) if (ret < 0) drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n", ret); - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return ret; } @@ -1415,17 +1416,17 @@ static int hdcp2_close_mei_session(struct intel_connector *connector) struct i915_hdcp_comp_master *comp; int ret; - mutex_lock(&dev_priv->hdcp_comp_mutex); - comp = dev_priv->hdcp_master; + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + comp = dev_priv->display.hdcp.master; if (!comp || !comp->ops) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return -EINVAL; } ret = comp->ops->close_hdcp_session(comp->mei_dev, &dig_port->hdcp_port_data); - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return ret; } @@ -2143,10 +2144,10 @@ static int i915_hdcp_component_bind(struct device *i915_kdev, struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev); drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n"); - mutex_lock(&dev_priv->hdcp_comp_mutex); - dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data; - dev_priv->hdcp_master->mei_dev = mei_kdev; - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + dev_priv->display.hdcp.master = (struct i915_hdcp_comp_master *)data; + dev_priv->display.hdcp.master->mei_dev = mei_kdev; + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return 0; } @@ -2157,9 +2158,9 @@ static void i915_hdcp_component_unbind(struct device *i915_kdev, struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev); drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n"); - mutex_lock(&dev_priv->hdcp_comp_mutex); - dev_priv->hdcp_master = NULL; - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + dev_priv->display.hdcp.master = NULL; + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); } static const struct component_ops i915_hdcp_component_ops = { @@ -2250,19 +2251,19 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv) if (!is_hdcp2_supported(dev_priv)) return; - mutex_lock(&dev_priv->hdcp_comp_mutex); - drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added); + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + drm_WARN_ON(&dev_priv->drm, dev_priv->display.hdcp.comp_added); - dev_priv->hdcp_comp_added = true; - mutex_unlock(&dev_priv->hdcp_comp_mutex); + dev_priv->display.hdcp.comp_added = true; + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops, I915_COMPONENT_HDCP); if (ret < 0) { drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n", ret); - mutex_lock(&dev_priv->hdcp_comp_mutex); - dev_priv->hdcp_comp_added = false; - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + dev_priv->display.hdcp.comp_added = false; + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return; } } @@ -2475,14 +2476,14 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, void intel_hdcp_component_fini(struct drm_i915_private *dev_priv) { - mutex_lock(&dev_priv->hdcp_comp_mutex); - if (!dev_priv->hdcp_comp_added) { - mutex_unlock(&dev_priv->hdcp_comp_mutex); + mutex_lock(&dev_priv->display.hdcp.comp_mutex); + if (!dev_priv->display.hdcp.comp_added) { + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); return; } - dev_priv->hdcp_comp_added = false; - mutex_unlock(&dev_priv->hdcp_comp_mutex); + dev_priv->display.hdcp.comp_added = false; + mutex_unlock(&dev_priv->display.hdcp.comp_mutex); component_del(dev_priv->drm.dev, &i915_hdcp_component_ops); } diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h new file mode 100644 index 000000000000..2a3733e8966c --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h @@ -0,0 +1,270 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_HDCP_REGS_H__ +#define __INTEL_HDCP_REGS_H__ + +#include "i915_reg_defs.h" + +/* HDCP Key Registers */ +#define HDCP_KEY_CONF _MMIO(0x66c00) +#define HDCP_AKSV_SEND_TRIGGER REG_BIT(31) +#define HDCP_CLEAR_KEYS_TRIGGER REG_BIT(30) +#define HDCP_KEY_LOAD_TRIGGER REG_BIT(8) +#define HDCP_KEY_STATUS _MMIO(0x66c04) +#define HDCP_FUSE_IN_PROGRESS REG_BIT(7) +#define HDCP_FUSE_ERROR REG_BIT(6) +#define HDCP_FUSE_DONE REG_BIT(5) +#define HDCP_KEY_LOAD_STATUS REG_BIT(1) +#define HDCP_KEY_LOAD_DONE REG_BIT(0) +#define HDCP_AKSV_LO _MMIO(0x66c10) +#define HDCP_AKSV_HI _MMIO(0x66c14) + +/* HDCP Repeater Registers */ +#define HDCP_REP_CTL _MMIO(0x66d00) +#define HDCP_TRANSA_REP_PRESENT REG_BIT(31) +#define HDCP_TRANSB_REP_PRESENT REG_BIT(30) +#define HDCP_TRANSC_REP_PRESENT REG_BIT(29) +#define HDCP_TRANSD_REP_PRESENT REG_BIT(28) +#define HDCP_DDIB_REP_PRESENT REG_BIT(30) +#define HDCP_DDIA_REP_PRESENT REG_BIT(29) +#define HDCP_DDIC_REP_PRESENT REG_BIT(28) +#define HDCP_DDID_REP_PRESENT REG_BIT(27) +#define HDCP_DDIF_REP_PRESENT REG_BIT(26) +#define HDCP_DDIE_REP_PRESENT REG_BIT(25) +#define HDCP_TRANSA_SHA1_M0 (1 << 20) +#define HDCP_TRANSB_SHA1_M0 (2 << 20) +#define HDCP_TRANSC_SHA1_M0 (3 << 20) +#define HDCP_TRANSD_SHA1_M0 (4 << 20) +#define HDCP_DDIB_SHA1_M0 (1 << 20) +#define HDCP_DDIA_SHA1_M0 (2 << 20) +#define HDCP_DDIC_SHA1_M0 (3 << 20) +#define HDCP_DDID_SHA1_M0 (4 << 20) +#define HDCP_DDIF_SHA1_M0 (5 << 20) +#define HDCP_DDIE_SHA1_M0 (6 << 20) /* Bspec says 5? */ +#define HDCP_SHA1_BUSY REG_BIT(16) +#define HDCP_SHA1_READY REG_BIT(17) +#define HDCP_SHA1_COMPLETE REG_BIT(18) +#define HDCP_SHA1_V_MATCH REG_BIT(19) +#define HDCP_SHA1_TEXT_32 (1 << 1) +#define HDCP_SHA1_COMPLETE_HASH (2 << 1) +#define HDCP_SHA1_TEXT_24 (4 << 1) +#define HDCP_SHA1_TEXT_16 (5 << 1) +#define HDCP_SHA1_TEXT_8 (6 << 1) +#define HDCP_SHA1_TEXT_0 (7 << 1) +#define HDCP_SHA_V_PRIME_H0 _MMIO(0x66d04) +#define HDCP_SHA_V_PRIME_H1 _MMIO(0x66d08) +#define HDCP_SHA_V_PRIME_H2 _MMIO(0x66d0C) +#define HDCP_SHA_V_PRIME_H3 _MMIO(0x66d10) +#define HDCP_SHA_V_PRIME_H4 _MMIO(0x66d14) +#define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + (h) * 4)) +#define HDCP_SHA_TEXT _MMIO(0x66d18) + +/* HDCP Auth Registers */ +#define _PORTA_HDCP_AUTHENC 0x66800 +#define _PORTB_HDCP_AUTHENC 0x66500 +#define _PORTC_HDCP_AUTHENC 0x66600 +#define _PORTD_HDCP_AUTHENC 0x66700 +#define _PORTE_HDCP_AUTHENC 0x66A00 +#define _PORTF_HDCP_AUTHENC 0x66900 +#define _PORT_HDCP_AUTHENC(port, x) _MMIO(_PICK(port, \ + _PORTA_HDCP_AUTHENC, \ + _PORTB_HDCP_AUTHENC, \ + _PORTC_HDCP_AUTHENC, \ + _PORTD_HDCP_AUTHENC, \ + _PORTE_HDCP_AUTHENC, \ + _PORTF_HDCP_AUTHENC) + (x)) +#define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0) +#define _TRANSA_HDCP_CONF 0x66400 +#define _TRANSB_HDCP_CONF 0x66500 +#define TRANS_HDCP_CONF(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \ + _TRANSB_HDCP_CONF) +#define HDCP_CONF(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP_CONF(trans) : \ + PORT_HDCP_CONF(port)) + +#define HDCP_CONF_CAPTURE_AN REG_BIT(0) +#define HDCP_CONF_AUTH_AND_ENC (REG_BIT(1) | REG_BIT(0)) +#define PORT_HDCP_ANINIT(port) _PORT_HDCP_AUTHENC(port, 0x4) +#define _TRANSA_HDCP_ANINIT 0x66404 +#define _TRANSB_HDCP_ANINIT 0x66504 +#define TRANS_HDCP_ANINIT(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP_ANINIT, \ + _TRANSB_HDCP_ANINIT) +#define HDCP_ANINIT(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP_ANINIT(trans) : \ + PORT_HDCP_ANINIT(port)) + +#define PORT_HDCP_ANLO(port) _PORT_HDCP_AUTHENC(port, 0x8) +#define _TRANSA_HDCP_ANLO 0x66408 +#define _TRANSB_HDCP_ANLO 0x66508 +#define TRANS_HDCP_ANLO(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \ + _TRANSB_HDCP_ANLO) +#define HDCP_ANLO(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP_ANLO(trans) : \ + PORT_HDCP_ANLO(port)) + +#define PORT_HDCP_ANHI(port) _PORT_HDCP_AUTHENC(port, 0xC) +#define _TRANSA_HDCP_ANHI 0x6640C +#define _TRANSB_HDCP_ANHI 0x6650C +#define TRANS_HDCP_ANHI(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \ + _TRANSB_HDCP_ANHI) +#define HDCP_ANHI(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP_ANHI(trans) : \ + PORT_HDCP_ANHI(port)) + +#define PORT_HDCP_BKSVLO(port) _PORT_HDCP_AUTHENC(port, 0x10) +#define _TRANSA_HDCP_BKSVLO 0x66410 +#define _TRANSB_HDCP_BKSVLO 0x66510 +#define TRANS_HDCP_BKSVLO(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP_BKSVLO, \ + _TRANSB_HDCP_BKSVLO) +#define HDCP_BKSVLO(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP_BKSVLO(trans) : \ + PORT_HDCP_BKSVLO(port)) + +#define PORT_HDCP_BKSVHI(port) _PORT_HDCP_AUTHENC(port, 0x14) +#define _TRANSA_HDCP_BKSVHI 0x66414 +#define _TRANSB_HDCP_BKSVHI 0x66514 +#define TRANS_HDCP_BKSVHI(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP_BKSVHI, \ + _TRANSB_HDCP_BKSVHI) +#define HDCP_BKSVHI(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP_BKSVHI(trans) : \ + PORT_HDCP_BKSVHI(port)) + +#define PORT_HDCP_RPRIME(port) _PORT_HDCP_AUTHENC(port, 0x18) +#define _TRANSA_HDCP_RPRIME 0x66418 +#define _TRANSB_HDCP_RPRIME 0x66518 +#define TRANS_HDCP_RPRIME(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP_RPRIME, \ + _TRANSB_HDCP_RPRIME) +#define HDCP_RPRIME(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP_RPRIME(trans) : \ + PORT_HDCP_RPRIME(port)) + +#define PORT_HDCP_STATUS(port) _PORT_HDCP_AUTHENC(port, 0x1C) +#define _TRANSA_HDCP_STATUS 0x6641C +#define _TRANSB_HDCP_STATUS 0x6651C +#define TRANS_HDCP_STATUS(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP_STATUS, \ + _TRANSB_HDCP_STATUS) +#define HDCP_STATUS(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP_STATUS(trans) : \ + PORT_HDCP_STATUS(port)) + +#define HDCP_STATUS_STREAM_A_ENC REG_BIT(31) +#define HDCP_STATUS_STREAM_B_ENC REG_BIT(30) +#define HDCP_STATUS_STREAM_C_ENC REG_BIT(29) +#define HDCP_STATUS_STREAM_D_ENC REG_BIT(28) +#define HDCP_STATUS_AUTH REG_BIT(21) +#define HDCP_STATUS_ENC REG_BIT(20) +#define HDCP_STATUS_RI_MATCH REG_BIT(19) +#define HDCP_STATUS_R0_READY REG_BIT(18) +#define HDCP_STATUS_AN_READY REG_BIT(17) +#define HDCP_STATUS_CIPHER REG_BIT(16) +#define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff) + +/* HDCP2.2 Registers */ +#define _PORTA_HDCP2_BASE 0x66800 +#define _PORTB_HDCP2_BASE 0x66500 +#define _PORTC_HDCP2_BASE 0x66600 +#define _PORTD_HDCP2_BASE 0x66700 +#define _PORTE_HDCP2_BASE 0x66A00 +#define _PORTF_HDCP2_BASE 0x66900 +#define _PORT_HDCP2_BASE(port, x) _MMIO(_PICK((port), \ + _PORTA_HDCP2_BASE, \ + _PORTB_HDCP2_BASE, \ + _PORTC_HDCP2_BASE, \ + _PORTD_HDCP2_BASE, \ + _PORTE_HDCP2_BASE, \ + _PORTF_HDCP2_BASE) + (x)) + +#define PORT_HDCP2_AUTH(port) _PORT_HDCP2_BASE(port, 0x98) +#define _TRANSA_HDCP2_AUTH 0x66498 +#define _TRANSB_HDCP2_AUTH 0x66598 +#define TRANS_HDCP2_AUTH(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_AUTH, \ + _TRANSB_HDCP2_AUTH) +#define AUTH_LINK_AUTHENTICATED REG_BIT(31) +#define AUTH_LINK_TYPE REG_BIT(30) +#define AUTH_FORCE_CLR_INPUTCTR REG_BIT(19) +#define AUTH_CLR_KEYS REG_BIT(18) +#define HDCP2_AUTH(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP2_AUTH(trans) : \ + PORT_HDCP2_AUTH(port)) + +#define PORT_HDCP2_CTL(port) _PORT_HDCP2_BASE(port, 0xB0) +#define _TRANSA_HDCP2_CTL 0x664B0 +#define _TRANSB_HDCP2_CTL 0x665B0 +#define TRANS_HDCP2_CTL(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_CTL, \ + _TRANSB_HDCP2_CTL) +#define CTL_LINK_ENCRYPTION_REQ REG_BIT(31) +#define HDCP2_CTL(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP2_CTL(trans) : \ + PORT_HDCP2_CTL(port)) + +#define PORT_HDCP2_STATUS(port) _PORT_HDCP2_BASE(port, 0xB4) +#define _TRANSA_HDCP2_STATUS 0x664B4 +#define _TRANSB_HDCP2_STATUS 0x665B4 +#define TRANS_HDCP2_STATUS(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP2_STATUS, \ + _TRANSB_HDCP2_STATUS) +#define LINK_TYPE_STATUS REG_BIT(22) +#define LINK_AUTH_STATUS REG_BIT(21) +#define LINK_ENCRYPTION_STATUS REG_BIT(20) +#define HDCP2_STATUS(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP2_STATUS(trans) : \ + PORT_HDCP2_STATUS(port)) + +#define _PIPEA_HDCP2_STREAM_STATUS 0x668C0 +#define _PIPEB_HDCP2_STREAM_STATUS 0x665C0 +#define _PIPEC_HDCP2_STREAM_STATUS 0x666C0 +#define _PIPED_HDCP2_STREAM_STATUS 0x667C0 +#define PIPE_HDCP2_STREAM_STATUS(pipe) _MMIO(_PICK((pipe), \ + _PIPEA_HDCP2_STREAM_STATUS, \ + _PIPEB_HDCP2_STREAM_STATUS, \ + _PIPEC_HDCP2_STREAM_STATUS, \ + _PIPED_HDCP2_STREAM_STATUS)) + +#define _TRANSA_HDCP2_STREAM_STATUS 0x664C0 +#define _TRANSB_HDCP2_STREAM_STATUS 0x665C0 +#define TRANS_HDCP2_STREAM_STATUS(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP2_STREAM_STATUS, \ + _TRANSB_HDCP2_STREAM_STATUS) +#define STREAM_ENCRYPTION_STATUS REG_BIT(31) +#define STREAM_TYPE_STATUS REG_BIT(30) +#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP2_STREAM_STATUS(trans) : \ + PIPE_HDCP2_STREAM_STATUS(pipe)) + +#define _PORTA_HDCP2_AUTH_STREAM 0x66F00 +#define _PORTB_HDCP2_AUTH_STREAM 0x66F04 +#define PORT_HDCP2_AUTH_STREAM(port) _MMIO_PORT(port, \ + _PORTA_HDCP2_AUTH_STREAM, \ + _PORTB_HDCP2_AUTH_STREAM) +#define _TRANSA_HDCP2_AUTH_STREAM 0x66F00 +#define _TRANSB_HDCP2_AUTH_STREAM 0x66F04 +#define TRANS_HDCP2_AUTH_STREAM(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP2_AUTH_STREAM, \ + _TRANSB_HDCP2_AUTH_STREAM) +#define AUTH_STREAM_TYPE REG_BIT(31) +#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \ + (GRAPHICS_VER(dev_priv) >= 12 ? \ + TRANS_HDCP2_AUTH_STREAM(trans) : \ + PORT_HDCP2_AUTH_STREAM(port)) + +#endif /* __INTEL_HDCP_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index ebd91aa69dd2..7816b2a33fee 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -50,6 +50,7 @@ #include "intel_dp.h" #include "intel_gmbus.h" #include "intel_hdcp.h" +#include "intel_hdcp_regs.h" #include "intel_hdmi.h" #include "intel_lspcon.h" #include "intel_panel.h" @@ -1891,7 +1892,7 @@ int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output) * 1.5x for 12bpc * 1.25x for 10bpc */ - return clock * bpc / 8; + return DIV_ROUND_CLOSEST(clock * bpc, 8); } static bool intel_hdmi_source_bpc_possible(struct drm_i915_private *i915, int bpc) @@ -2001,6 +2002,15 @@ intel_hdmi_mode_valid(struct drm_connector *connector, clock *= 2; } + /* + * HDMI2.1 requires higher resolution modes like 8k60, 4K120 to be + * enumerated only if FRL is supported. Current platforms do not support + * FRL so prune the higher resolution modes that require doctclock more + * than 600MHz. + */ + if (clock > 600000) + return MODE_CLOCK_HIGH; + ycbcr_420_only = drm_mode_is_420_only(&connector->display_info, mode); status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, ycbcr_420_only); diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 5f8b4f481cff..f7a2f485b177 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -119,13 +119,13 @@ intel_connector_hpd_pin(struct intel_connector *connector) * responsible for further action. * * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is - * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to + * stored in @dev_priv->display.hotplug.hpd_storm_threshold which defaults to * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and * short IRQs count as +1. If this threshold is exceeded, it's considered an * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED. * * By default, most systems will only count long IRQs towards - * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also + * &dev_priv->display.hotplug.hpd_storm_threshold. However, some older systems also * suffer from short IRQ storms and must also track these. Because short IRQ * storms are naturally caused by sideband interactions with DP MST devices, * short IRQ detection is only enabled for systems without DP MST support. @@ -140,7 +140,7 @@ intel_connector_hpd_pin(struct intel_connector *connector) static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, enum hpd_pin pin, bool long_hpd) { - struct i915_hotplug *hpd = &dev_priv->hotplug; + struct intel_hotplug *hpd = &dev_priv->display.hotplug; unsigned long start = hpd->stats[pin].last_jiffies; unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); const int increment = long_hpd ? 10 : 1; @@ -148,7 +148,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, bool storm = false; if (!threshold || - (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled)) + (!long_hpd && !dev_priv->display.hotplug.hpd_short_storm_enabled)) return false; if (!time_in_range(jiffies, start, end)) { @@ -191,7 +191,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) pin = intel_connector_hpd_pin(connector); if (pin == HPD_NONE || - dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED) + dev_priv->display.hotplug.stats[pin].state != HPD_MARK_DISABLED) continue; drm_info(&dev_priv->drm, @@ -199,7 +199,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) "switching from hotplug detection to polling\n", connector->base.name); - dev_priv->hotplug.stats[pin].state = HPD_DISABLED; + dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED; connector->base.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; hpd_disabled = true; @@ -209,7 +209,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) /* Enable polling and queue hotplug re-enabling. */ if (hpd_disabled) { drm_kms_helper_poll_enable(dev); - mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, + mod_delayed_work(system_wq, &dev_priv->display.hotplug.reenable_work, msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); } } @@ -218,7 +218,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, typeof(*dev_priv), - hotplug.reenable_work.work); + display.hotplug.reenable_work.work); struct drm_device *dev = &dev_priv->drm; struct drm_connector_list_iter conn_iter; struct intel_connector *connector; @@ -233,7 +233,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) for_each_intel_connector_iter(connector, &conn_iter) { pin = intel_connector_hpd_pin(connector); if (pin == HPD_NONE || - dev_priv->hotplug.stats[pin].state != HPD_DISABLED) + dev_priv->display.hotplug.stats[pin].state != HPD_DISABLED) continue; if (connector->base.polled != connector->polled) @@ -245,8 +245,8 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) drm_connector_list_iter_end(&conn_iter); for_each_hpd_pin(pin) { - if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) - dev_priv->hotplug.stats[pin].state = HPD_ENABLED; + if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) + dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED; } intel_hpd_irq_setup(dev_priv); @@ -297,16 +297,16 @@ static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder) static void i915_digport_work_func(struct work_struct *work) { struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, hotplug.dig_port_work); + container_of(work, struct drm_i915_private, display.hotplug.dig_port_work); u32 long_port_mask, short_port_mask; struct intel_encoder *encoder; u32 old_bits = 0; spin_lock_irq(&dev_priv->irq_lock); - long_port_mask = dev_priv->hotplug.long_port_mask; - dev_priv->hotplug.long_port_mask = 0; - short_port_mask = dev_priv->hotplug.short_port_mask; - dev_priv->hotplug.short_port_mask = 0; + long_port_mask = dev_priv->display.hotplug.long_port_mask; + dev_priv->display.hotplug.long_port_mask = 0; + short_port_mask = dev_priv->display.hotplug.short_port_mask; + dev_priv->display.hotplug.short_port_mask = 0; spin_unlock_irq(&dev_priv->irq_lock); for_each_intel_encoder(&dev_priv->drm, encoder) { @@ -335,9 +335,9 @@ static void i915_digport_work_func(struct work_struct *work) if (old_bits) { spin_lock_irq(&dev_priv->irq_lock); - dev_priv->hotplug.event_bits |= old_bits; + dev_priv->display.hotplug.event_bits |= old_bits; spin_unlock_irq(&dev_priv->irq_lock); - queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0); + queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0); } } @@ -353,10 +353,10 @@ void intel_hpd_trigger_irq(struct intel_digital_port *dig_port) struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); spin_lock_irq(&i915->irq_lock); - i915->hotplug.short_port_mask |= BIT(dig_port->base.port); + i915->display.hotplug.short_port_mask |= BIT(dig_port->base.port); spin_unlock_irq(&i915->irq_lock); - queue_work(i915->hotplug.dp_wq, &i915->hotplug.dig_port_work); + queue_work(i915->display.hotplug.dp_wq, &i915->display.hotplug.dig_port_work); } /* @@ -366,7 +366,7 @@ static void i915_hotplug_work_func(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, - hotplug.hotplug_work.work); + display.hotplug.hotplug_work.work); struct drm_device *dev = &dev_priv->drm; struct drm_connector_list_iter conn_iter; struct intel_connector *connector; @@ -379,10 +379,10 @@ static void i915_hotplug_work_func(struct work_struct *work) spin_lock_irq(&dev_priv->irq_lock); - hpd_event_bits = dev_priv->hotplug.event_bits; - dev_priv->hotplug.event_bits = 0; - hpd_retry_bits = dev_priv->hotplug.retry_bits; - dev_priv->hotplug.retry_bits = 0; + hpd_event_bits = dev_priv->display.hotplug.event_bits; + dev_priv->display.hotplug.event_bits = 0; + hpd_retry_bits = dev_priv->display.hotplug.retry_bits; + dev_priv->display.hotplug.retry_bits = 0; /* Enable polling for connectors which had HPD IRQ storms */ intel_hpd_irq_storm_switch_to_polling(dev_priv); @@ -435,10 +435,10 @@ static void i915_hotplug_work_func(struct work_struct *work) retry &= ~changed; if (retry) { spin_lock_irq(&dev_priv->irq_lock); - dev_priv->hotplug.retry_bits |= retry; + dev_priv->display.hotplug.retry_bits |= retry; spin_unlock_irq(&dev_priv->irq_lock); - mod_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, + mod_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, msecs_to_jiffies(HPD_RETRY_DELAY)); } } @@ -502,10 +502,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, if (long_hpd) { long_hpd_pulse_mask |= BIT(pin); - dev_priv->hotplug.long_port_mask |= BIT(port); + dev_priv->display.hotplug.long_port_mask |= BIT(port); } else { short_hpd_pulse_mask |= BIT(pin); - dev_priv->hotplug.short_port_mask |= BIT(port); + dev_priv->display.hotplug.short_port_mask |= BIT(port); } } @@ -516,7 +516,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, if (!(BIT(pin) & pin_mask)) continue; - if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) { + if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) { /* * On GMCH platforms the interrupt mask bits only * prevent irq generation, not the setting of the @@ -529,7 +529,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, continue; } - if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED) + if (dev_priv->display.hotplug.stats[pin].state != HPD_ENABLED) continue; /* @@ -540,13 +540,13 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) { long_hpd = long_hpd_pulse_mask & BIT(pin); } else { - dev_priv->hotplug.event_bits |= BIT(pin); + dev_priv->display.hotplug.event_bits |= BIT(pin); long_hpd = true; queue_hp = true; } if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) { - dev_priv->hotplug.event_bits &= ~BIT(pin); + dev_priv->display.hotplug.event_bits &= ~BIT(pin); storm_detected = true; queue_hp = true; } @@ -567,9 +567,9 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, * deadlock. */ if (queue_dig) - queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work); + queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work); if (queue_hp) - queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0); + queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0); } /** @@ -594,8 +594,8 @@ void intel_hpd_init(struct drm_i915_private *dev_priv) return; for_each_hpd_pin(i) { - dev_priv->hotplug.stats[i].count = 0; - dev_priv->hotplug.stats[i].state = HPD_ENABLED; + dev_priv->display.hotplug.stats[i].count = 0; + dev_priv->display.hotplug.stats[i].state = HPD_ENABLED; } /* @@ -611,7 +611,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, - hotplug.poll_init_work); + display.hotplug.poll_init_work); struct drm_device *dev = &dev_priv->drm; struct drm_connector_list_iter conn_iter; struct intel_connector *connector; @@ -619,7 +619,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work) mutex_lock(&dev->mode_config.mutex); - enabled = READ_ONCE(dev_priv->hotplug.poll_enabled); + enabled = READ_ONCE(dev_priv->display.hotplug.poll_enabled); drm_connector_list_iter_begin(dev, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { @@ -672,7 +672,7 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv) !INTEL_DISPLAY_ENABLED(dev_priv)) return; - WRITE_ONCE(dev_priv->hotplug.poll_enabled, true); + WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true); /* * We might already be holding dev->mode_config.mutex, so do this in a @@ -680,7 +680,7 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv) * As well, there's no issue if we race here since we always reschedule * this worker anyway */ - schedule_work(&dev_priv->hotplug.poll_init_work); + schedule_work(&dev_priv->display.hotplug.poll_init_work); } /** @@ -707,17 +707,17 @@ void intel_hpd_poll_disable(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) return; - WRITE_ONCE(dev_priv->hotplug.poll_enabled, false); - schedule_work(&dev_priv->hotplug.poll_init_work); + WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false); + schedule_work(&dev_priv->display.hotplug.poll_init_work); } void intel_hpd_init_work(struct drm_i915_private *dev_priv) { - INIT_DELAYED_WORK(&dev_priv->hotplug.hotplug_work, + INIT_DELAYED_WORK(&dev_priv->display.hotplug.hotplug_work, i915_hotplug_work_func); - INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func); - INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work); - INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work, + INIT_WORK(&dev_priv->display.hotplug.dig_port_work, i915_digport_work_func); + INIT_WORK(&dev_priv->display.hotplug.poll_init_work, i915_hpd_poll_init_work); + INIT_DELAYED_WORK(&dev_priv->display.hotplug.reenable_work, intel_hpd_irq_storm_reenable_work); } @@ -728,17 +728,17 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) spin_lock_irq(&dev_priv->irq_lock); - dev_priv->hotplug.long_port_mask = 0; - dev_priv->hotplug.short_port_mask = 0; - dev_priv->hotplug.event_bits = 0; - dev_priv->hotplug.retry_bits = 0; + dev_priv->display.hotplug.long_port_mask = 0; + dev_priv->display.hotplug.short_port_mask = 0; + dev_priv->display.hotplug.event_bits = 0; + dev_priv->display.hotplug.retry_bits = 0; spin_unlock_irq(&dev_priv->irq_lock); - cancel_work_sync(&dev_priv->hotplug.dig_port_work); - cancel_delayed_work_sync(&dev_priv->hotplug.hotplug_work); - cancel_work_sync(&dev_priv->hotplug.poll_init_work); - cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work); + cancel_work_sync(&dev_priv->display.hotplug.dig_port_work); + cancel_delayed_work_sync(&dev_priv->display.hotplug.hotplug_work); + cancel_work_sync(&dev_priv->display.hotplug.poll_init_work); + cancel_delayed_work_sync(&dev_priv->display.hotplug.reenable_work); } bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin) @@ -749,8 +749,8 @@ bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin) return false; spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) { - dev_priv->hotplug.stats[pin].state = HPD_DISABLED; + if (dev_priv->display.hotplug.stats[pin].state == HPD_ENABLED) { + dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED; ret = true; } spin_unlock_irq(&dev_priv->irq_lock); @@ -764,6 +764,6 @@ void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin) return; spin_lock_irq(&dev_priv->irq_lock); - dev_priv->hotplug.stats[pin].state = HPD_ENABLED; + dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED; spin_unlock_irq(&dev_priv->irq_lock); } diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c index 4970bf146c4a..dca6003ccac8 100644 --- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c @@ -73,8 +73,9 @@ #include "i915_drv.h" #include "intel_de.h" #include "intel_lpe_audio.h" +#include "intel_pci_config.h" -#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->audio.lpe.platdev != NULL) +#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->display.audio.lpe.platdev != NULL) static struct platform_device * lpe_audio_platdev_create(struct drm_i915_private *dev_priv) @@ -96,13 +97,13 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv) return ERR_PTR(-ENOMEM); } - rsc[0].start = rsc[0].end = dev_priv->audio.lpe.irq; + rsc[0].start = rsc[0].end = dev_priv->display.audio.lpe.irq; rsc[0].flags = IORESOURCE_IRQ; rsc[0].name = "hdmi-lpe-audio-irq"; - rsc[1].start = pci_resource_start(pdev, 0) + + rsc[1].start = pci_resource_start(pdev, GTTMMADR_BAR) + I915_HDMI_LPE_AUDIO_BASE; - rsc[1].end = pci_resource_start(pdev, 0) + + rsc[1].end = pci_resource_start(pdev, GTTMMADR_BAR) + I915_HDMI_LPE_AUDIO_BASE + I915_HDMI_LPE_AUDIO_SIZE - 1; rsc[1].flags = IORESOURCE_MEM; rsc[1].name = "hdmi-lpe-audio-mmio"; @@ -148,7 +149,7 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv) * than us fiddle with its internals. */ - platform_device_unregister(dev_priv->audio.lpe.platdev); + platform_device_unregister(dev_priv->display.audio.lpe.platdev); } static void lpe_audio_irq_unmask(struct irq_data *d) @@ -167,7 +168,7 @@ static struct irq_chip lpe_audio_irqchip = { static int lpe_audio_irq_init(struct drm_i915_private *dev_priv) { - int irq = dev_priv->audio.lpe.irq; + int irq = dev_priv->display.audio.lpe.irq; drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); irq_set_chip_and_handler_name(irq, @@ -204,15 +205,15 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv) { int ret; - dev_priv->audio.lpe.irq = irq_alloc_desc(0); - if (dev_priv->audio.lpe.irq < 0) { + dev_priv->display.audio.lpe.irq = irq_alloc_desc(0); + if (dev_priv->display.audio.lpe.irq < 0) { drm_err(&dev_priv->drm, "Failed to allocate IRQ desc: %d\n", - dev_priv->audio.lpe.irq); - ret = dev_priv->audio.lpe.irq; + dev_priv->display.audio.lpe.irq); + ret = dev_priv->display.audio.lpe.irq; goto err; } - drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->audio.lpe.irq); + drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->display.audio.lpe.irq); ret = lpe_audio_irq_init(dev_priv); @@ -223,10 +224,10 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv) goto err_free_irq; } - dev_priv->audio.lpe.platdev = lpe_audio_platdev_create(dev_priv); + dev_priv->display.audio.lpe.platdev = lpe_audio_platdev_create(dev_priv); - if (IS_ERR(dev_priv->audio.lpe.platdev)) { - ret = PTR_ERR(dev_priv->audio.lpe.platdev); + if (IS_ERR(dev_priv->display.audio.lpe.platdev)) { + ret = PTR_ERR(dev_priv->display.audio.lpe.platdev); drm_err(&dev_priv->drm, "Failed to create lpe audio platform device: %d\n", ret); @@ -241,10 +242,10 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv) return 0; err_free_irq: - irq_free_desc(dev_priv->audio.lpe.irq); + irq_free_desc(dev_priv->display.audio.lpe.irq); err: - dev_priv->audio.lpe.irq = -1; - dev_priv->audio.lpe.platdev = NULL; + dev_priv->display.audio.lpe.irq = -1; + dev_priv->display.audio.lpe.platdev = NULL; return ret; } @@ -262,7 +263,7 @@ void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv) if (!HAS_LPE_AUDIO(dev_priv)) return; - ret = generic_handle_irq(dev_priv->audio.lpe.irq); + ret = generic_handle_irq(dev_priv->display.audio.lpe.irq); if (ret) drm_err_ratelimited(&dev_priv->drm, "error handling LPE audio irq: %d\n", ret); @@ -303,10 +304,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv) lpe_audio_platdev_destroy(dev_priv); - irq_free_desc(dev_priv->audio.lpe.irq); + irq_free_desc(dev_priv->display.audio.lpe.irq); - dev_priv->audio.lpe.irq = -1; - dev_priv->audio.lpe.platdev = NULL; + dev_priv->display.audio.lpe.irq = -1; + dev_priv->display.audio.lpe.platdev = NULL; } /** @@ -333,7 +334,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, if (!HAS_LPE_AUDIO(dev_priv)) return; - pdata = dev_get_platdata(&dev_priv->audio.lpe.platdev->dev); + pdata = dev_get_platdata(&dev_priv->display.audio.lpe.platdev->dev); ppdata = &pdata->port[port - PORT_B]; spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags); @@ -361,7 +362,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, } if (pdata->notify_audio_lpe) - pdata->notify_audio_lpe(dev_priv->audio.lpe.platdev, port - PORT_B); + pdata->notify_audio_lpe(dev_priv->display.audio.lpe.platdev, port - PORT_B); spin_unlock_irqrestore(&pdata->lpe_audio_slock, irqflags); } diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index 730480ac3300..9aa38e8141b5 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -837,12 +837,12 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) /* Skip init on machines we know falsely report LVDS */ if (dmi_check_system(intel_no_lvds)) { - drm_WARN(dev, !dev_priv->vbt.int_lvds_support, + drm_WARN(dev, !dev_priv->display.vbt.int_lvds_support, "Useless DMI match. Internal LVDS support disabled by VBT\n"); return; } - if (!dev_priv->vbt.int_lvds_support) { + if (!dev_priv->display.vbt.int_lvds_support) { drm_dbg_kms(&dev_priv->drm, "Internal LVDS support disabled by VBT\n"); return; diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c index f0e04d3904c6..cbfabd58b75a 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c +++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c @@ -23,6 +23,7 @@ #include "intel_modeset_setup.h" #include "intel_pch_display.h" #include "intel_pm.h" +#include "skl_watermark.h" static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, struct drm_modeset_acquire_ctx *ctx) @@ -30,11 +31,11 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, struct intel_encoder *encoder; struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_bw_state *bw_state = - to_intel_bw_state(i915->bw_obj.state); + to_intel_bw_state(i915->display.bw.obj.state); struct intel_cdclk_state *cdclk_state = - to_intel_cdclk_state(i915->cdclk.obj.state); + to_intel_cdclk_state(i915->display.cdclk.obj.state); struct intel_dbuf_state *dbuf_state = - to_intel_dbuf_state(i915->dbuf.obj.state); + to_intel_dbuf_state(i915->display.dbuf.obj.state); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane *plane; @@ -70,7 +71,7 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, drm_WARN_ON(&i915->drm, IS_ERR(temp_crtc_state) || ret); - i915->display->crtc_disable(to_intel_atomic_state(state), crtc); + i915->display.funcs.display->crtc_disable(to_intel_atomic_state(state), crtc); drm_atomic_state_put(state); @@ -415,9 +416,9 @@ static void readout_plane_state(struct drm_i915_private *i915) static void intel_modeset_readout_hw_state(struct drm_i915_private *i915) { struct intel_cdclk_state *cdclk_state = - to_intel_cdclk_state(i915->cdclk.obj.state); + to_intel_cdclk_state(i915->display.cdclk.obj.state); struct intel_dbuf_state *dbuf_state = - to_intel_dbuf_state(i915->dbuf.obj.state); + to_intel_dbuf_state(i915->display.dbuf.obj.state); enum pipe pipe; struct intel_crtc *crtc; struct intel_encoder *encoder; @@ -535,7 +536,7 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915) for_each_intel_crtc(&i915->drm, crtc) { struct intel_bw_state *bw_state = - to_intel_bw_state(i915->bw_obj.state); + to_intel_bw_state(i915->display.bw.obj.state); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane *plane; diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c index a91586d77cb6..0fdcf2e6d57f 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c +++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c @@ -15,8 +15,8 @@ #include "intel_display_types.h" #include "intel_fdi.h" #include "intel_modeset_verify.h" -#include "intel_pm.h" #include "intel_snps_phy.h" +#include "skl_watermark.h" /* * Cross check the actual hw state with our own modeset state tracking (and its @@ -94,10 +94,10 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, /* * FDI already provided one idea for the dotclock. - * Yell if the encoder disagrees. + * Yell if the encoder disagrees. Allow for slight + * rounding differences. */ - drm_WARN(&dev_priv->drm, - !intel_fuzzy_clock_check(fdi_dotclock, dotclock), + drm_WARN(&dev_priv->drm, abs(fdi_dotclock - dotclock) > 1, "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", fdi_dotclock, dotclock); } diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index 1c0c745c142d..caa07ef34f21 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -252,7 +252,7 @@ struct opregion_asle_ext { static int check_swsci_function(struct drm_i915_private *i915, u32 function) { - struct opregion_swsci *swsci = i915->opregion.swsci; + struct opregion_swsci *swsci = i915->display.opregion.swsci; u32 main_function, sub_function; if (!swsci) @@ -265,11 +265,11 @@ static int check_swsci_function(struct drm_i915_private *i915, u32 function) /* Check if we can call the function. See swsci_setup for details. */ if (main_function == SWSCI_SBCB) { - if ((i915->opregion.swsci_sbcb_sub_functions & + if ((i915->display.opregion.swsci_sbcb_sub_functions & (1 << sub_function)) == 0) return -EINVAL; } else if (main_function == SWSCI_GBDA) { - if ((i915->opregion.swsci_gbda_sub_functions & + if ((i915->display.opregion.swsci_gbda_sub_functions & (1 << sub_function)) == 0) return -EINVAL; } @@ -280,7 +280,7 @@ static int check_swsci_function(struct drm_i915_private *i915, u32 function) static int swsci(struct drm_i915_private *dev_priv, u32 function, u32 parm, u32 *parm_out) { - struct opregion_swsci *swsci = dev_priv->opregion.swsci; + struct opregion_swsci *swsci = dev_priv->display.opregion.swsci; struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); u32 scic, dslp; u16 swsci_val; @@ -462,7 +462,7 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp) { struct intel_connector *connector; struct drm_connector_list_iter conn_iter; - struct opregion_asle *asle = dev_priv->opregion.asle; + struct opregion_asle *asle = dev_priv->display.opregion.asle; struct drm_device *dev = &dev_priv->drm; drm_dbg(&dev_priv->drm, "bclp = 0x%08x\n", bclp); @@ -586,8 +586,8 @@ static void asle_work(struct work_struct *work) struct intel_opregion *opregion = container_of(work, struct intel_opregion, asle_work); struct drm_i915_private *dev_priv = - container_of(opregion, struct drm_i915_private, opregion); - struct opregion_asle *asle = dev_priv->opregion.asle; + container_of(opregion, struct drm_i915_private, display.opregion); + struct opregion_asle *asle = dev_priv->display.opregion.asle; u32 aslc_stat = 0; u32 aslc_req; @@ -635,8 +635,8 @@ static void asle_work(struct work_struct *work) void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) { - if (dev_priv->opregion.asle) - schedule_work(&dev_priv->opregion.asle_work); + if (dev_priv->display.opregion.asle) + schedule_work(&dev_priv->display.opregion.asle_work); } #define ACPI_EV_DISPLAY_SWITCH (1<<0) @@ -692,7 +692,7 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val) static void intel_didl_outputs(struct drm_i915_private *dev_priv) { - struct intel_opregion *opregion = &dev_priv->opregion; + struct intel_opregion *opregion = &dev_priv->display.opregion; struct intel_connector *connector; struct drm_connector_list_iter conn_iter; int i = 0, max_outputs; @@ -731,7 +731,7 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv) static void intel_setup_cadls(struct drm_i915_private *dev_priv) { - struct intel_opregion *opregion = &dev_priv->opregion; + struct intel_opregion *opregion = &dev_priv->display.opregion; struct intel_connector *connector; struct drm_connector_list_iter conn_iter; int i = 0; @@ -761,7 +761,7 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv) static void swsci_setup(struct drm_i915_private *dev_priv) { - struct intel_opregion *opregion = &dev_priv->opregion; + struct intel_opregion *opregion = &dev_priv->display.opregion; bool requested_callbacks = false; u32 tmp; @@ -839,7 +839,7 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = { static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv) { - struct intel_opregion *opregion = &dev_priv->opregion; + struct intel_opregion *opregion = &dev_priv->display.opregion; const struct firmware *fw = NULL; const char *name = dev_priv->params.vbt_firmware; int ret; @@ -879,7 +879,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv) int intel_opregion_setup(struct drm_i915_private *dev_priv) { - struct intel_opregion *opregion = &dev_priv->opregion; + struct intel_opregion *opregion = &dev_priv->display.opregion; struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); u32 asls, mboxes; char buf[sizeof(OPREGION_SIGNATURE)]; @@ -1106,7 +1106,7 @@ struct edid *intel_opregion_get_edid(struct intel_connector *intel_connector) { struct drm_connector *connector = &intel_connector->base; struct drm_i915_private *i915 = to_i915(connector->dev); - struct intel_opregion *opregion = &i915->opregion; + struct intel_opregion *opregion = &i915->display.opregion; const void *in_edid; const struct edid *edid; struct edid *new_edid; @@ -1141,7 +1141,7 @@ struct edid *intel_opregion_get_edid(struct intel_connector *intel_connector) bool intel_opregion_headless_sku(struct drm_i915_private *i915) { - struct intel_opregion *opregion = &i915->opregion; + struct intel_opregion *opregion = &i915->display.opregion; struct opregion_header *header = opregion->header; if (!header || header->over.major < 2 || @@ -1153,7 +1153,7 @@ bool intel_opregion_headless_sku(struct drm_i915_private *i915) void intel_opregion_register(struct drm_i915_private *i915) { - struct intel_opregion *opregion = &i915->opregion; + struct intel_opregion *opregion = &i915->display.opregion; if (!opregion->header) return; @@ -1169,7 +1169,7 @@ void intel_opregion_register(struct drm_i915_private *i915) void intel_opregion_resume(struct drm_i915_private *i915) { - struct intel_opregion *opregion = &i915->opregion; + struct intel_opregion *opregion = &i915->display.opregion; if (!opregion->header) return; @@ -1200,7 +1200,7 @@ void intel_opregion_resume(struct drm_i915_private *i915) void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state) { - struct intel_opregion *opregion = &i915->opregion; + struct intel_opregion *opregion = &i915->display.opregion; if (!opregion->header) return; @@ -1210,7 +1210,7 @@ void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state) if (opregion->asle) opregion->asle->ardy = ASLE_ARDY_NOT_READY; - cancel_work_sync(&i915->opregion.asle_work); + cancel_work_sync(&i915->display.opregion.asle_work); if (opregion->acpi) opregion->acpi->drdy = 0; @@ -1218,7 +1218,7 @@ void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state) void intel_opregion_unregister(struct drm_i915_private *i915) { - struct intel_opregion *opregion = &i915->opregion; + struct intel_opregion *opregion = &i915->display.opregion; intel_opregion_suspend(i915, PCI_D1); diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 79ed8bd04a07..c12bdca8da9b 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -211,9 +211,9 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv, /* WA_OVERLAY_CLKGATE:alm */ if (enable) - intel_de_write(dev_priv, DSPCLK_GATE_D, 0); + intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), 0); else - intel_de_write(dev_priv, DSPCLK_GATE_D, + intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), OVRUNIT_CLOCK_GATE_DISABLE); /* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */ @@ -487,7 +487,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) void intel_overlay_reset(struct drm_i915_private *dev_priv) { - struct intel_overlay *overlay = dev_priv->overlay; + struct intel_overlay *overlay = dev_priv->display.overlay; if (!overlay) return; @@ -1113,7 +1113,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *new_bo; int ret; - overlay = dev_priv->overlay; + overlay = dev_priv->display.overlay; if (!overlay) { drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n"); return -ENODEV; @@ -1273,7 +1273,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, struct intel_overlay *overlay; int ret; - overlay = dev_priv->overlay; + overlay = dev_priv->display.overlay; if (!overlay) { drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n"); return -ENODEV; @@ -1416,7 +1416,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv) update_polyphase_filter(overlay->regs); update_reg_attrs(overlay, overlay->regs); - dev_priv->overlay = overlay; + dev_priv->display.overlay = overlay; drm_info(&dev_priv->drm, "Initialized overlay support.\n"); return; @@ -1428,7 +1428,7 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv) { struct intel_overlay *overlay; - overlay = fetch_and_zero(&dev_priv->overlay); + overlay = fetch_and_zero(&dev_priv->display.overlay); if (!overlay) return; @@ -1457,7 +1457,7 @@ struct intel_overlay_error_state { struct intel_overlay_error_state * intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) { - struct intel_overlay *overlay = dev_priv->overlay; + struct intel_overlay *overlay = dev_priv->display.overlay; struct intel_overlay_error_state *error; if (!overlay || !overlay->active) diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index 237a40623dd7..a3a3f9fe4342 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -37,13 +37,14 @@ #include "intel_display_types.h" #include "intel_drrs.h" #include "intel_panel.h" +#include "intel_quirks.h" bool intel_panel_use_ssc(struct drm_i915_private *i915) { if (i915->params.panel_use_ssc >= 0) return i915->params.panel_use_ssc != 0; - return i915->vbt.lvds_use_ssc - && !(i915->quirks & QUIRK_LVDS_SSC_DISABLE); + return i915->display.vbt.lvds_use_ssc && + !intel_has_quirk(i915, QUIRK_LVDS_SSC_DISABLE); } const struct drm_display_mode * @@ -81,15 +82,14 @@ static bool is_alt_drrs_mode(const struct drm_display_mode *mode, mode->clock != preferred_mode->clock; } -static bool is_alt_vrr_mode(const struct drm_display_mode *mode, - const struct drm_display_mode *preferred_mode) +static bool is_alt_fixed_mode(const struct drm_display_mode *mode, + const struct drm_display_mode *preferred_mode) { return drm_mode_match(mode, preferred_mode, DRM_MODE_MATCH_FLAGS | DRM_MODE_MATCH_3D_FLAGS) && mode->hdisplay == preferred_mode->hdisplay && - mode->vdisplay == preferred_mode->vdisplay && - mode->clock != preferred_mode->clock; + mode->vdisplay == preferred_mode->vdisplay; } const struct drm_display_mode * @@ -114,6 +114,21 @@ intel_panel_downclock_mode(struct intel_connector *connector, return best_mode; } +const struct drm_display_mode * +intel_panel_highest_mode(struct intel_connector *connector, + const struct drm_display_mode *adjusted_mode) +{ + const struct drm_display_mode *fixed_mode, *best_mode = adjusted_mode; + + /* pick the fixed_mode that has the highest clock */ + list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) { + if (fixed_mode->clock > best_mode->clock) + best_mode = fixed_mode; + } + + return best_mode; +} + int intel_panel_get_modes(struct intel_connector *connector) { const struct drm_display_mode *fixed_mode; @@ -172,19 +187,7 @@ int intel_panel_compute_config(struct intel_connector *connector, return 0; } -static bool is_alt_fixed_mode(const struct drm_display_mode *mode, - const struct drm_display_mode *preferred_mode, - bool has_vrr) -{ - /* is_alt_drrs_mode() is a subset of is_alt_vrr_mode() */ - if (has_vrr) - return is_alt_vrr_mode(mode, preferred_mode); - else - return is_alt_drrs_mode(mode, preferred_mode); -} - -static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connector, - bool has_vrr) +static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); const struct drm_display_mode *preferred_mode = @@ -192,7 +195,7 @@ static void intel_panel_add_edid_alt_fixed_modes(struct intel_connector *connect struct drm_display_mode *mode, *next; list_for_each_entry_safe(mode, next, &connector->base.probed_modes, head) { - if (!is_alt_fixed_mode(mode, preferred_mode, has_vrr)) + if (!is_alt_fixed_mode(mode, preferred_mode)) continue; drm_dbg_kms(&dev_priv->drm, @@ -255,7 +258,7 @@ void intel_panel_add_edid_fixed_modes(struct intel_connector *connector, { intel_panel_add_edid_preferred_mode(connector); if (intel_panel_preferred_fixed_mode(connector) && (has_drrs || has_vrr)) - intel_panel_add_edid_alt_fixed_modes(connector, has_vrr); + intel_panel_add_edid_alt_fixed_modes(connector); intel_panel_destroy_probed_modes(connector); } diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h index b087c0c3cc6d..eff3ffd3d082 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.h +++ b/drivers/gpu/drm/i915/display/intel_panel.h @@ -31,6 +31,9 @@ intel_panel_fixed_mode(struct intel_connector *connector, const struct drm_display_mode * intel_panel_downclock_mode(struct intel_connector *connector, const struct drm_display_mode *adjusted_mode); +const struct drm_display_mode * +intel_panel_highest_mode(struct intel_connector *connector, + const struct drm_display_mode *adjusted_mode); int intel_panel_get_modes(struct intel_connector *connector); enum drrs_type intel_panel_drrs_type(struct intel_connector *connector); enum drm_mode_status diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c index 9934c8a9e240..a66097cdc1e0 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c +++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c @@ -167,6 +167,15 @@ static void lpt_compute_iclkip(struct iclkip_params *p, int clock) } } +int lpt_iclkip(const struct intel_crtc_state *crtc_state) +{ + struct iclkip_params p; + + lpt_compute_iclkip(&p, crtc_state->hw.adjusted_mode.crtc_clock); + + return lpt_iclkip_freq(&p); +} + /* Program iCLKIP clock to the desired frequency */ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) { @@ -179,6 +188,7 @@ void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) lpt_disable_iclkip(dev_priv); lpt_compute_iclkip(&p, clock); + drm_WARN_ON(&dev_priv->drm, lpt_iclkip_freq(&p) != clock); /* This should not happen with any sane values */ drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) & @@ -514,7 +524,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) } if (HAS_PCH_IBX(dev_priv)) { - has_ck505 = dev_priv->vbt.display_clock_mode; + has_ck505 = dev_priv->display.vbt.display_clock_mode; can_ssc = has_ck505; } else { has_ck505 = false; @@ -522,7 +532,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) } /* Check if any DPLLs are using the SSC source */ - for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { + for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) { u32 temp = intel_de_read(dev_priv, PCH_DPLL(i)); if (!(temp & DPLL_VCO_ENABLE)) @@ -654,7 +664,7 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) } } - BUG_ON(val != final); + drm_WARN_ON(&dev_priv->drm, val != final); } /* diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.h b/drivers/gpu/drm/i915/display/intel_pch_refclk.h index 12ab2c75a800..9bcf56629f24 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_refclk.h +++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.h @@ -14,6 +14,7 @@ struct intel_crtc_state; void lpt_program_iclkip(const struct intel_crtc_state *crtc_state); void lpt_disable_iclkip(struct drm_i915_private *dev_priv); int lpt_get_iclkip(struct drm_i915_private *dev_priv); +int lpt_iclkip(const struct intel_crtc_state *crtc_state); void intel_init_pch_refclk(struct drm_i915_private *dev_priv); void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c index d10f27d0b7b0..76be796df255 100644 --- a/drivers/gpu/drm/i915/display/intel_plane_initial.c +++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c @@ -311,7 +311,7 @@ void intel_crtc_initial_plane_config(struct intel_crtc *crtc) * can even allow for smooth boot transitions if the BIOS * fb is large enough for the active pipe configuration. */ - dev_priv->display->get_initial_plane_config(crtc, &plane_config); + dev_priv->display.funcs.display->get_initial_plane_config(crtc, &plane_config); /* * If the fb is shared between multiple heads, we'll diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index 1b21a341962f..21944f5bf3a8 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -12,6 +12,7 @@ #include "intel_dpll.h" #include "intel_lvds.h" #include "intel_pps.h" +#include "intel_quirks.h" static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, enum pipe pipe); @@ -28,7 +29,7 @@ intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp) * See intel_pps_reset_all() why we need a power domain reference here. */ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE); - mutex_lock(&dev_priv->pps_mutex); + mutex_lock(&dev_priv->display.pps.mutex); return wakeref; } @@ -38,7 +39,7 @@ intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp, { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - mutex_unlock(&dev_priv->pps_mutex); + mutex_unlock(&dev_priv->display.pps.mutex); intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); return 0; @@ -163,7 +164,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum pipe pipe; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); /* We should never land here with regular DP ports */ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); @@ -212,7 +213,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp) struct intel_connector *connector = intel_dp->attached_connector; int backlight_controller = connector->panel.vbt.backlight.controller; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); /* We should never land here with regular DP ports */ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); @@ -282,7 +283,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum port port = dig_port->base.port; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); /* try to find a pipe with this port selected */ /* first pick one where the panel is on */ @@ -407,7 +408,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && intel_dp->pps.pps_pipe == INVALID_PIPE) @@ -420,7 +421,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && intel_dp->pps.pps_pipe == INVALID_PIPE) @@ -463,7 +464,7 @@ static void wait_panel_status(struct intel_dp *intel_dp, struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); i915_reg_t pp_stat_reg, pp_ctrl_reg; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); intel_pps_verify_state(intel_dp); @@ -556,7 +557,7 @@ static u32 ilk_get_pp_control(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 control; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)); if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) && @@ -580,7 +581,7 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp) i915_reg_t pp_stat_reg, pp_ctrl_reg; bool need_to_disable = !intel_dp->pps.want_panel_vdd; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); if (!intel_dp_is_edp(intel_dp)) return false; @@ -657,7 +658,7 @@ static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp) u32 pp; i915_reg_t pp_stat_reg, pp_ctrl_reg; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd); @@ -748,7 +749,7 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); if (!intel_dp_is_edp(intel_dp)) return; @@ -771,7 +772,7 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp) u32 pp; i915_reg_t pp_ctrl_reg; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); if (!intel_dp_is_edp(intel_dp)) return; @@ -832,7 +833,7 @@ void intel_pps_off_unlocked(struct intel_dp *intel_dp) u32 pp; i915_reg_t pp_ctrl_reg; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); if (!intel_dp_is_edp(intel_dp)) return; @@ -991,7 +992,7 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, { struct intel_encoder *encoder; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); for_each_intel_dp(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -1021,7 +1022,7 @@ void vlv_pps_init(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE); @@ -1064,7 +1065,7 @@ static void pps_vdd_init(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); if (!edp_have_panel_vdd(intel_dp)) return; @@ -1176,7 +1177,7 @@ static void pps_init_delays_bios(struct intel_dp *intel_dp, { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); if (!pps_delays_valid(&intel_dp->pps.bios_pps_delays)) intel_pps_readout_hw_state(intel_dp, &intel_dp->pps.bios_pps_delays); @@ -1202,7 +1203,7 @@ static void pps_init_delays_vbt(struct intel_dp *intel_dp, * just fails to power back on. Increasing the delay to 800ms * seems sufficient to avoid this problem. */ - if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { + if (intel_has_quirk(dev_priv, QUIRK_INCREASE_T12_DELAY)) { vbt->t11_t12 = max_t(u16, vbt->t11_t12, 1300 * 10); drm_dbg_kms(&dev_priv->drm, "Increasing T12 panel delay as per the quirk to %d\n", @@ -1223,7 +1224,7 @@ static void pps_init_delays_spec(struct intel_dp *intel_dp, { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of * our hw here, which are all in 100usec. */ @@ -1246,7 +1247,7 @@ static void pps_init_delays(struct intel_dp *intel_dp) struct edp_power_seq cur, vbt, spec, *final = &intel_dp->pps.pps_delays; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); /* already initialized? */ if (pps_delays_valid(final)) @@ -1312,7 +1313,7 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd enum port port = dp_to_dig_port(intel_dp)->base.port; const struct edp_power_seq *seq = &intel_dp->pps.pps_delays; - lockdep_assert_held(&dev_priv->pps_mutex); + lockdep_assert_held(&dev_priv->display.pps.mutex); intel_pps_get_registers(intel_dp, ®s); @@ -1487,11 +1488,11 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) void intel_pps_setup(struct drm_i915_private *i915) { if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915)) - i915->pps_mmio_base = PCH_PPS_BASE; + i915->display.pps.mmio_base = PCH_PPS_BASE; else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) - i915->pps_mmio_base = VLV_PPS_BASE; + i915->display.pps.mmio_base = VLV_PPS_BASE; else - i915->pps_mmio_base = PPS_BASE; + i915->display.pps.mmio_base = PPS_BASE; } void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index e6a870641cd2..9def8d9fade6 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -706,7 +706,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, if (crtc_state->enable_psr2_sel_fetch) return; - if (!(dev_priv->dmc.allowed_dc_mask & DC_STATE_EN_DC3CO)) + if (!(dev_priv->display.dmc.allowed_dc_mask & DC_STATE_EN_DC3CO)) return; if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state)) @@ -805,13 +805,14 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start; hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock); - /* From spec: (72 / number of lanes) * 1000 / symbol clock frequency MHz */ - req_ns = (72 / crtc_state->lane_count) * 1000 / (crtc_state->port_clock / 1000); + /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */ + req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000); if ((hblank_ns - req_ns) > 100) return true; - if (DISPLAY_VER(dev_priv) < 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b) + /* Not supported <13 / Wa_22012279113:adl-p */ + if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b) return false; crtc_state->req_psr2_sdp_prior_scanline = true; @@ -1721,8 +1722,6 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, new_plane_state, i) { struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1, .x2 = INT_MAX }; - struct drm_atomic_helper_damage_iter iter; - struct drm_rect clip; if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc) continue; @@ -1767,22 +1766,18 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, continue; } - drm_rect_fp_to_int(&src, &new_plane_state->uapi.src); + src = drm_plane_state_src(&new_plane_state->uapi); + drm_rect_fp_to_int(&src, &src); - drm_atomic_helper_damage_iter_init(&iter, - &old_plane_state->uapi, - &new_plane_state->uapi); - drm_atomic_for_each_plane_damage(&iter, &clip) { - if (drm_rect_intersect(&clip, &src)) - clip_area_update(&damaged_area, &clip, - &crtc_state->pipe_src); - } - - if (damaged_area.y1 == -1) + if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi, + &new_plane_state->uapi, &damaged_area)) continue; damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1; damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1; + damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1; + damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1; + clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src); } @@ -1863,7 +1858,9 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); - const struct intel_crtc_state *crtc_state = + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder; @@ -1871,7 +1868,7 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state, return; for_each_intel_encoder_mask_with_psr(state->base.dev, encoder, - crtc_state->uapi.encoder_mask) { + old_crtc_state->uapi.encoder_mask) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_psr *psr = &intel_dp->psr; bool needs_to_disable = false; @@ -1884,10 +1881,10 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state, * - All planes will go inactive * - Changing between PSR versions */ - needs_to_disable |= intel_crtc_needs_modeset(crtc_state); - needs_to_disable |= !crtc_state->has_psr; - needs_to_disable |= !crtc_state->active_planes; - needs_to_disable |= crtc_state->has_psr2 != psr->psr2_enabled; + needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state); + needs_to_disable |= !new_crtc_state->has_psr; + needs_to_disable |= !new_crtc_state->active_planes; + needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled; if (psr->enabled && needs_to_disable) intel_psr_disable_locked(intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c index c8488f5ebd04..6e48d3bcdfec 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.c +++ b/drivers/gpu/drm/i915/display/intel_quirks.c @@ -9,12 +9,17 @@ #include "intel_display_types.h" #include "intel_quirks.h" +static void intel_set_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk) +{ + i915->display.quirks.mask |= BIT(quirk); +} + /* * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason */ static void quirk_ssc_force_disable(struct drm_i915_private *i915) { - i915->quirks |= QUIRK_LVDS_SSC_DISABLE; + intel_set_quirk(i915, QUIRK_LVDS_SSC_DISABLE); drm_info(&i915->drm, "applying lvds SSC disable quirk\n"); } @@ -24,14 +29,14 @@ static void quirk_ssc_force_disable(struct drm_i915_private *i915) */ static void quirk_invert_brightness(struct drm_i915_private *i915) { - i915->quirks |= QUIRK_INVERT_BRIGHTNESS; + intel_set_quirk(i915, QUIRK_INVERT_BRIGHTNESS); drm_info(&i915->drm, "applying inverted panel brightness quirk\n"); } /* Some VBT's incorrectly indicate no backlight is present */ static void quirk_backlight_present(struct drm_i915_private *i915) { - i915->quirks |= QUIRK_BACKLIGHT_PRESENT; + intel_set_quirk(i915, QUIRK_BACKLIGHT_PRESENT); drm_info(&i915->drm, "applying backlight present quirk\n"); } @@ -40,7 +45,7 @@ static void quirk_backlight_present(struct drm_i915_private *i915) */ static void quirk_increase_t12_delay(struct drm_i915_private *i915) { - i915->quirks |= QUIRK_INCREASE_T12_DELAY; + intel_set_quirk(i915, QUIRK_INCREASE_T12_DELAY); drm_info(&i915->drm, "Applying T12 delay quirk\n"); } @@ -50,13 +55,13 @@ static void quirk_increase_t12_delay(struct drm_i915_private *i915) */ static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915) { - i915->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME; + intel_set_quirk(i915, QUIRK_INCREASE_DDI_DISABLED_TIME); drm_info(&i915->drm, "Applying Increase DDI Disabled quirk\n"); } static void quirk_no_pps_backlight_power_hook(struct drm_i915_private *i915) { - i915->quirks |= QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK; + intel_set_quirk(i915, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK); drm_info(&i915->drm, "Applying no pps backlight power quirk\n"); } @@ -191,6 +196,9 @@ static struct intel_quirk intel_quirks[] = { /* ASRock ITX*/ { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, + /* ECS Liva Q2 */ + { 0x3185, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time }, + { 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time }, }; void intel_init_quirks(struct drm_i915_private *i915) @@ -213,3 +221,8 @@ void intel_init_quirks(struct drm_i915_private *i915) intel_dmi_quirks[i].hook(i915); } } + +bool intel_has_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk) +{ + return i915->display.quirks.mask & BIT(quirk); +} diff --git a/drivers/gpu/drm/i915/display/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h index b0fcff142a56..10a4d163149f 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.h +++ b/drivers/gpu/drm/i915/display/intel_quirks.h @@ -6,8 +6,20 @@ #ifndef __INTEL_QUIRKS_H__ #define __INTEL_QUIRKS_H__ +#include <linux/types.h> + struct drm_i915_private; -void intel_init_quirks(struct drm_i915_private *dev_priv); +enum intel_quirk_id { + QUIRK_BACKLIGHT_PRESENT, + QUIRK_INCREASE_DDI_DISABLED_TIME, + QUIRK_INCREASE_T12_DELAY, + QUIRK_INVERT_BRIGHTNESS, + QUIRK_LVDS_SSC_DISABLE, + QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK, +}; + +void intel_init_quirks(struct drm_i915_private *i915); +bool intel_has_quirk(struct drm_i915_private *i915, enum intel_quirk_id quirk); #endif /* __INTEL_QUIRKS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 19122bc6d2ab..f5b744bef18f 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -2016,7 +2016,7 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector) return drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, - dev_priv->vbt.crt_ddc_pin)); + dev_priv->display.vbt.crt_ddc_pin)); } static enum drm_connector_status @@ -2581,9 +2581,9 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, struct sdvo_device_mapping *mapping; if (sdvo->port == PORT_B) - mapping = &dev_priv->vbt.sdvo_mappings[0]; + mapping = &dev_priv->display.vbt.sdvo_mappings[0]; else - mapping = &dev_priv->vbt.sdvo_mappings[1]; + mapping = &dev_priv->display.vbt.sdvo_mappings[1]; if (mapping->initialized) sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); @@ -2599,9 +2599,9 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, u8 pin; if (sdvo->port == PORT_B) - mapping = &dev_priv->vbt.sdvo_mappings[0]; + mapping = &dev_priv->display.vbt.sdvo_mappings[0]; else - mapping = &dev_priv->vbt.sdvo_mappings[1]; + mapping = &dev_priv->display.vbt.sdvo_mappings[1]; if (mapping->initialized && intel_gmbus_is_valid_pin(dev_priv, mapping->i2c_pin)) @@ -2639,11 +2639,11 @@ intel_sdvo_get_slave_addr(struct drm_i915_private *dev_priv, struct sdvo_device_mapping *my_mapping, *other_mapping; if (sdvo->port == PORT_B) { - my_mapping = &dev_priv->vbt.sdvo_mappings[0]; - other_mapping = &dev_priv->vbt.sdvo_mappings[1]; + my_mapping = &dev_priv->display.vbt.sdvo_mappings[0]; + other_mapping = &dev_priv->display.vbt.sdvo_mappings[1]; } else { - my_mapping = &dev_priv->vbt.sdvo_mappings[1]; - other_mapping = &dev_priv->vbt.sdvo_mappings[0]; + my_mapping = &dev_priv->display.vbt.sdvo_mappings[1]; + other_mapping = &dev_priv->display.vbt.sdvo_mappings[0]; } /* If the BIOS described our SDVO device, take advantage of it. */ diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index 0bdbedc67d7d..937cefd6f78f 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -518,6 +518,1086 @@ static const struct intel_mpllb_state dg2_hdmi_148_5 = { }; /* values in the below table are calculted using the algo */ +static const struct intel_mpllb_state dg2_hdmi_25200 = { + .clock = 25200, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 128) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 41943) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2621), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_27027 = { + .clock = 27027, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 140) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 31876) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 46555), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_28320 = { + .clock = 28320, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 148) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 40894) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 30408), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_30240 = { + .clock = 30240, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 160) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 50331) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 42466), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_31500 = { + .clock = 31500, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 68) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_36000 = { + .clock = 36000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 82) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 39320), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_40000 = { + .clock = 40000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 96) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_49500 = { + .clock = 49500, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 1), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 126) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_50000 = { + .clock = 50000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 1), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 128) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_57284 = { + .clock = 57284, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 150) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 42886) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 49701), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_58000 = { + .clock = 58000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 4) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 152) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_65000 = { + .clock = 65000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 72) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_71000 = { + .clock = 71000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 80) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_74176 = { + .clock = 74176, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22334) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 43829), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_75000 = { + .clock = 75000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 88) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_78750 = { + .clock = 78750, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 94) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_85500 = { + .clock = 85500, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 104) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_88750 = { + .clock = 88750, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 1), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 110) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_106500 = { + .clock = 106500, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 138) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_108000 = { + .clock = 108000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 140) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_115500 = { + .clock = 115500, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 152) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_119000 = { + .clock = 119000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 158) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_135000 = { + .clock = 135000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 76) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 0), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_138500 = { + .clock = 138500, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 78) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 26214), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_147160 = { + .clock = 147160, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 84) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 56623) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 6815), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_148352 = { + .clock = 148352, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22334) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 43829), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_154000 = { + .clock = 154000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 13) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 90) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 39320), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_162000 = { + .clock = 162000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 96) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_209800 = { + .clock = 209800, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 134) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 60293) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 7864), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_262750 = { + .clock = 262750, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 72) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 36044) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_268500 = { + .clock = 268500, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 74) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 45875) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_296703 = { + .clock = 296703, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22321) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 36804), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_241500 = { + .clock = 241500, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 160) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 39320), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_497750 = { + .clock = 497750, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 0), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 166) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 36044) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 52427), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_592000 = { + .clock = 592000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 13107) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13107), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + +static const struct intel_mpllb_state dg2_hdmi_593407 = { + .clock = 593407, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 0) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 22328) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 7549), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + static const struct intel_mpllb_state dg2_hdmi_297 = { .clock = 297000, .ref_control = @@ -584,6 +1664,42 @@ static const struct intel_mpllb_state * const dg2_hdmi_tables[] = { &dg2_hdmi_148_5, &dg2_hdmi_297, &dg2_hdmi_594, + &dg2_hdmi_25200, + &dg2_hdmi_27027, + &dg2_hdmi_28320, + &dg2_hdmi_30240, + &dg2_hdmi_31500, + &dg2_hdmi_36000, + &dg2_hdmi_40000, + &dg2_hdmi_49500, + &dg2_hdmi_50000, + &dg2_hdmi_57284, + &dg2_hdmi_58000, + &dg2_hdmi_65000, + &dg2_hdmi_71000, + &dg2_hdmi_74176, + &dg2_hdmi_75000, + &dg2_hdmi_78750, + &dg2_hdmi_85500, + &dg2_hdmi_88750, + &dg2_hdmi_106500, + &dg2_hdmi_108000, + &dg2_hdmi_115500, + &dg2_hdmi_119000, + &dg2_hdmi_135000, + &dg2_hdmi_138500, + &dg2_hdmi_147160, + &dg2_hdmi_148352, + &dg2_hdmi_154000, + &dg2_hdmi_162000, + &dg2_hdmi_209800, + &dg2_hdmi_241500, + &dg2_hdmi_262750, + &dg2_hdmi_268500, + &dg2_hdmi_296703, + &dg2_hdmi_497750, + &dg2_hdmi_592000, + &dg2_hdmi_593407, NULL, }; diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 6773840f6cc7..e5af955b5600 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -246,7 +246,7 @@ static u32 icl_tc_port_live_status_mask(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_uncore *uncore = &i915->uncore; - u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin]; + u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin]; u32 mask = 0; u32 val; @@ -279,7 +279,7 @@ static u32 adl_tc_port_live_status_mask(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); - u32 isr_bit = i915->hotplug.pch_hpd[dig_port->base.hpd_pin]; + u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin]; struct intel_uncore *uncore = &i915->uncore; u32 val, mask = 0; diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index 9379f3463344..dcf89d701f0f 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -39,6 +39,7 @@ #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" +#include "intel_dpll.h" #include "intel_hotplug.h" #include "intel_tv.h" @@ -982,10 +983,10 @@ intel_tv_mode_vdisplay(const struct tv_mode *tv_mode) static void intel_tv_mode_to_mode(struct drm_display_mode *mode, - const struct tv_mode *tv_mode) + const struct tv_mode *tv_mode, + int clock) { - mode->clock = tv_mode->clock / - (tv_mode->oversample >> !tv_mode->progressive); + mode->clock = clock / (tv_mode->oversample >> !tv_mode->progressive); /* * tv_mode horizontal timings: @@ -1143,7 +1144,7 @@ intel_tv_get_config(struct intel_encoder *encoder, xsize = tmp >> 16; ysize = tmp & 0xffff; - intel_tv_mode_to_mode(&mode, &tv_mode); + intel_tv_mode_to_mode(&mode, &tv_mode, pipe_config->port_clock); drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&mode)); @@ -1184,6 +1185,9 @@ intel_tv_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { + struct intel_atomic_state *state = + to_intel_atomic_state(pipe_config->uapi.state); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_tv_connector_state *tv_conn_state = to_intel_tv_connector_state(conn_state); @@ -1192,6 +1196,7 @@ intel_tv_compute_config(struct intel_encoder *encoder, &pipe_config->hw.adjusted_mode; int hdisplay = adjusted_mode->crtc_hdisplay; int vdisplay = adjusted_mode->crtc_vdisplay; + int ret; if (!tv_mode) return -EINVAL; @@ -1206,7 +1211,13 @@ intel_tv_compute_config(struct intel_encoder *encoder, pipe_config->port_clock = tv_mode->clock; - intel_tv_mode_to_mode(adjusted_mode, tv_mode); + ret = intel_dpll_crtc_compute_clock(state, crtc); + if (ret) + return ret; + + pipe_config->clock_set = true; + + intel_tv_mode_to_mode(adjusted_mode, tv_mode, pipe_config->port_clock); drm_mode_set_crtcinfo(adjusted_mode, 0); if (intel_tv_source_too_wide(dev_priv, hdisplay) || @@ -1804,7 +1815,7 @@ intel_tv_get_modes(struct drm_connector *connector) * about the actual timings of the mode. We * do ignore the margins though. */ - intel_tv_mode_to_mode(mode, tv_mode); + intel_tv_mode_to_mode(mode, tv_mode, tv_mode->clock); if (count == 0) { drm_dbg_kms(&dev_priv->drm, "TV mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index 509b0a419c20..a9f44abfc9fc 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -76,6 +76,20 @@ struct bdb_header { } __packed; /* + * BDB version number dependencies are documented as: + * + * <start>+ + * indicates the field was introduced in version <start> + * and is still valid + * + * <start>-<end> + * indicates the field was introduced in version <start> + * and obsoleted in version <end>+1. + * + * ??? indicates the specific version number is unknown + */ + +/* * There are several types of BIOS data blocks (BDBs), each block has * an ID and size in the first 3 bytes (ID in first, size in next 2). * Known types are listed below. @@ -144,12 +158,12 @@ struct bdb_general_features { /* bits 3 */ u8 disable_smooth_vision:1; u8 single_dvi:1; - u8 rotate_180:1; /* 181 */ + u8 rotate_180:1; /* 181+ */ u8 fdi_rx_polarity_inverted:1; - u8 vbios_extended_mode:1; /* 160 */ - u8 copy_ilfp_dtd_to_sdvo_lvds_dtd:1; /* 160 */ - u8 panel_best_fit_timing:1; /* 160 */ - u8 ignore_strap_state:1; /* 160 */ + u8 vbios_extended_mode:1; /* 160+ */ + u8 copy_ilfp_dtd_to_sdvo_lvds_dtd:1; /* 160+ */ + u8 panel_best_fit_timing:1; /* 160+ */ + u8 ignore_strap_state:1; /* 160+ */ /* bits 4 */ u8 legacy_monitor_detect; @@ -164,11 +178,11 @@ struct bdb_general_features { u8 rsvd11:2; /* finish byte */ /* bits 6 */ - u8 tc_hpd_retry_timeout:7; /* 242 */ + u8 tc_hpd_retry_timeout:7; /* 242+ */ u8 rsvd12:1; /* bits 7 */ - u8 afc_startup_config:2;/* 249 */ + u8 afc_startup_config:2; /* 249+ */ u8 rsvd13:6; } __packed; @@ -183,6 +197,15 @@ struct bdb_general_features { #define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */ /* Device handle */ +#define DEVICE_HANDLE_CRT 0x0001 +#define DEVICE_HANDLE_EFP1 0x0004 +#define DEVICE_HANDLE_EFP2 0x0040 +#define DEVICE_HANDLE_EFP3 0x0020 +#define DEVICE_HANDLE_EFP4 0x0010 /* 194+ */ +#define DEVICE_HANDLE_EFP5 0x0002 /* 215+ */ +#define DEVICE_HANDLE_EFP6 0x0001 /* 217+ */ +#define DEVICE_HANDLE_EFP7 0x0100 /* 217+ */ +#define DEVICE_HANDLE_EFP8 0x0200 /* 217+ */ #define DEVICE_HANDLE_LFP1 0x0008 #define DEVICE_HANDLE_LFP2 0x0080 @@ -275,27 +298,27 @@ struct bdb_general_features { #define DVO_PORT_DPC 8 #define DVO_PORT_DPD 9 #define DVO_PORT_DPA 10 -#define DVO_PORT_DPE 11 /* 193 */ -#define DVO_PORT_HDMIE 12 /* 193 */ +#define DVO_PORT_DPE 11 /* 193+ */ +#define DVO_PORT_HDMIE 12 /* 193+ */ #define DVO_PORT_DPF 13 /* N/A */ #define DVO_PORT_HDMIF 14 /* N/A */ -#define DVO_PORT_DPG 15 /* 217 */ -#define DVO_PORT_HDMIG 16 /* 217 */ -#define DVO_PORT_DPH 17 /* 217 */ -#define DVO_PORT_HDMIH 18 /* 217 */ -#define DVO_PORT_DPI 19 /* 217 */ -#define DVO_PORT_HDMII 20 /* 217 */ -#define DVO_PORT_MIPIA 21 /* 171 */ -#define DVO_PORT_MIPIB 22 /* 171 */ -#define DVO_PORT_MIPIC 23 /* 171 */ -#define DVO_PORT_MIPID 24 /* 171 */ - -#define HDMI_MAX_DATA_RATE_PLATFORM 0 /* 204 */ -#define HDMI_MAX_DATA_RATE_297 1 /* 204 */ -#define HDMI_MAX_DATA_RATE_165 2 /* 204 */ -#define HDMI_MAX_DATA_RATE_594 3 /* 249 */ -#define HDMI_MAX_DATA_RATE_340 4 /* 249 */ -#define HDMI_MAX_DATA_RATE_300 5 /* 249 */ +#define DVO_PORT_DPG 15 /* 217+ */ +#define DVO_PORT_HDMIG 16 /* 217+ */ +#define DVO_PORT_DPH 17 /* 217+ */ +#define DVO_PORT_HDMIH 18 /* 217+ */ +#define DVO_PORT_DPI 19 /* 217+ */ +#define DVO_PORT_HDMII 20 /* 217+ */ +#define DVO_PORT_MIPIA 21 /* 171+ */ +#define DVO_PORT_MIPIB 22 /* 171+ */ +#define DVO_PORT_MIPIC 23 /* 171+ */ +#define DVO_PORT_MIPID 24 /* 171+ */ + +#define HDMI_MAX_DATA_RATE_PLATFORM 0 /* 204+ */ +#define HDMI_MAX_DATA_RATE_297 1 /* 204+ */ +#define HDMI_MAX_DATA_RATE_165 2 /* 204+ */ +#define HDMI_MAX_DATA_RATE_594 3 /* 249+ */ +#define HDMI_MAX_DATA_RATE_340 4 /* 249+ */ +#define HDMI_MAX_DATA_RATE_300 5 /* 249+ */ #define LEGACY_CHILD_DEVICE_CONFIG_SIZE 33 @@ -362,10 +385,10 @@ enum vbt_gmbus_ddi { * basically any of the fields to ensure the correct interpretation for the BDB * version in question. * - * When we copy the child device configs to dev_priv->vbt.child_dev, we reserve - * space for the full structure below, and initialize the tail not actually - * present in VBT to zeros. Accessing those fields is fine, as long as the - * default zero is taken into account, again according to the BDB version. + * When we copy the child device configs to dev_priv->display.vbt.child_dev, we + * reserve space for the full structure below, and initialize the tail not + * actually present in VBT to zeros. Accessing those fields is fine, as long as + * the default zero is taken into account, again according to the BDB version. * * BDB versions 155 and below are considered legacy, and version 155 seems to be * a baseline for some of the VBT documentation. When adding new fields, please @@ -379,20 +402,30 @@ struct child_device_config { u8 device_id[10]; /* ascii string */ struct { u8 i2c_speed; - u8 dp_onboard_redriver; /* 158 */ - u8 dp_ondock_redriver; /* 158 */ - u8 hdmi_level_shifter_value:5; /* 169 */ - u8 hdmi_max_data_rate:3; /* 204 */ - u16 dtd_buf_ptr; /* 161 */ - u8 edidless_efp:1; /* 161 */ - u8 compression_enable:1; /* 198 */ - u8 compression_method_cps:1; /* 198 */ - u8 ganged_edp:1; /* 202 */ - u8 reserved0:4; - u8 compression_structure_index:4; /* 198 */ - u8 reserved1:4; - u8 slave_port; /* 202 */ - u8 reserved2; + u8 dp_onboard_redriver_preemph:3; /* 158+ */ + u8 dp_onboard_redriver_vswing:3; /* 158+ */ + u8 dp_onboard_redriver_present:1; /* 158+ */ + u8 reserved0:1; + u8 dp_ondock_redriver_preemph:3; /* 158+ */ + u8 dp_ondock_redriver_vswing:3; /* 158+ */ + u8 dp_ondock_redriver_present:1; /* 158+ */ + u8 reserved1:1; + u8 hdmi_level_shifter_value:5; /* 158+ */ + u8 hdmi_max_data_rate:3; /* 204+ */ + u16 dtd_buf_ptr; /* 161+ */ + u8 edidless_efp:1; /* 161+ */ + u8 compression_enable:1; /* 198+ */ + u8 compression_method_cps:1; /* 198+ */ + u8 ganged_edp:1; /* 202+ */ + u8 lttpr_non_transparent:1; /* 235+ */ + u8 disable_compression_for_ext_disp:1; /* 251+ */ + u8 reserved2:2; + u8 compression_structure_index:4; /* 198+ */ + u8 reserved3:4; + u8 hdmi_max_frl_rate:4; /* 237+ */ + u8 hdmi_max_frl_rate_valid:1; /* 237+ */ + u8 reserved4:3; /* 237+ */ + u8 reserved5; } __packed; } __packed; @@ -412,16 +445,16 @@ struct child_device_config { u8 ddc2_pin; } __packed; struct { - u8 efp_routed:1; /* 158 */ - u8 lane_reversal:1; /* 184 */ - u8 lspcon:1; /* 192 */ - u8 iboost:1; /* 196 */ - u8 hpd_invert:1; /* 196 */ - u8 use_vbt_vswing:1; /* 218 */ - u8 flag_reserved:2; - u8 hdmi_support:1; /* 158 */ - u8 dp_support:1; /* 158 */ - u8 tmds_support:1; /* 158 */ + u8 efp_routed:1; /* 158+ */ + u8 lane_reversal:1; /* 184+ */ + u8 lspcon:1; /* 192+ */ + u8 iboost:1; /* 196+ */ + u8 hpd_invert:1; /* 196+ */ + u8 use_vbt_vswing:1; /* 218+ */ + u8 dp_max_lane_count:2; /* 244+ */ + u8 hdmi_support:1; /* 158+ */ + u8 dp_support:1; /* 158+ */ + u8 tmds_support:1; /* 158+ */ u8 support_reserved:5; u8 aux_channel; u8 dongle_detect; @@ -429,7 +462,7 @@ struct child_device_config { } __packed; u8 pipe_cap:2; - u8 sdvo_stall:1; /* 158 */ + u8 sdvo_stall:1; /* 158+ */ u8 hpd_status:2; u8 integrated_encoder:1; u8 capabilities_reserved:2; @@ -437,21 +470,21 @@ struct child_device_config { union { u8 dvo2_wiring; - u8 mipi_bridge_type; /* 171 */ + u8 mipi_bridge_type; /* 171+ */ } __packed; u16 extended_type; u8 dvo_function; - u8 dp_usb_type_c:1; /* 195 */ - u8 tbt:1; /* 209 */ - u8 flags2_reserved:2; /* 195 */ - u8 dp_port_trace_length:4; /* 209 */ - u8 dp_gpio_index; /* 195 */ - u16 dp_gpio_pin_num; /* 195 */ - u8 dp_iboost_level:4; /* 196 */ - u8 hdmi_iboost_level:4; /* 196 */ - u8 dp_max_link_rate:3; /* 216/230 GLK+ */ - u8 dp_max_link_rate_reserved:5; /* 216/230 */ + u8 dp_usb_type_c:1; /* 195+ */ + u8 tbt:1; /* 209+ */ + u8 flags2_reserved:2; /* 195+ */ + u8 dp_port_trace_length:4; /* 209+ */ + u8 dp_gpio_index; /* 195+ */ + u16 dp_gpio_pin_num; /* 195+ */ + u8 dp_iboost_level:4; /* 196+ */ + u8 hdmi_iboost_level:4; /* 196+ */ + u8 dp_max_link_rate:3; /* 216+ */ + u8 dp_max_link_rate_reserved:5; /* 216+ */ } __packed; struct bdb_general_definitions { @@ -459,7 +492,7 @@ struct bdb_general_definitions { u8 crt_ddc_gmbus_pin; /* DPMS bits */ - u8 dpms_acpi:1; + u8 dpms_non_acpi:1; u8 skip_boot_crt_detect:1; u8 dpms_aim:1; u8 rsvd1:5; /* finish byte */ @@ -488,25 +521,25 @@ struct bdb_general_definitions { struct psr_table { /* Feature bits */ - u8 full_link:1; - u8 require_aux_to_wakeup:1; + u8 full_link:1; /* 165+ */ + u8 require_aux_to_wakeup:1; /* 165+ */ u8 feature_bits_rsvd:6; /* Wait times */ - u8 idle_frames:4; - u8 lines_to_wait:3; + u8 idle_frames:4; /* 165+ */ + u8 lines_to_wait:3; /* 165+ */ u8 wait_times_rsvd:1; /* TP wake up time in multiple of 100 */ - u16 tp1_wakeup_time; - u16 tp2_tp3_wakeup_time; + u16 tp1_wakeup_time; /* 165+ */ + u16 tp2_tp3_wakeup_time; /* 165+ */ } __packed; struct bdb_psr { struct psr_table psr_table[16]; /* PSR2 TP2/TP3 wakeup time for 16 panels */ - u32 psr2_tp2_tp3_wakeup_time; + u32 psr2_tp2_tp3_wakeup_time; /* 226+ */ } __packed; /* @@ -519,9 +552,10 @@ struct bdb_psr { #define BDB_DRIVER_FEATURE_INT_SDVO_LVDS 3 struct bdb_driver_features { + /* Driver bits */ u8 boot_dev_algorithm:1; - u8 block_display_switch:1; - u8 allow_display_switch:1; + u8 allow_display_switch_dvd:1; + u8 allow_display_switch_dos:1; u8 hotplug_dvo:1; u8 dual_view_zoom:1; u8 int15h_hook:1; @@ -533,6 +567,7 @@ struct bdb_driver_features { u8 boot_mode_bpp; u8 boot_mode_refresh; + /* Extended Driver Bits 1 */ u16 enable_lfp_primary:1; u16 selective_mode_pruning:1; u16 dual_frequency:1; @@ -548,29 +583,40 @@ struct bdb_driver_features { u16 tv_hotplug:1; u16 hdmi_config:2; - u8 static_display:1; - u8 reserved2:7; + /* Driver Flags 1 */ + u8 static_display:1; /* 163+ */ + u8 embedded_platform:1; /* 163+ */ + u8 display_subsystem_enable:1; /* 163+ */ + u8 reserved0:5; + u16 legacy_crt_max_x; u16 legacy_crt_max_y; u8 legacy_crt_max_refresh; - u8 hdmi_termination; - u8 custom_vbt_version; - /* Driver features data block */ - u16 rmpm_enabled:1; - u16 s2ddt_enabled:1; - u16 dpst_enabled:1; - u16 bltclt_enabled:1; - u16 adb_enabled:1; - u16 drrs_enabled:1; - u16 grs_enabled:1; - u16 gpmt_enabled:1; - u16 tbt_enabled:1; - u16 psr_enabled:1; - u16 ips_enabled:1; - u16 reserved3:1; - u16 dmrrs_enabled:1; - u16 reserved4:2; + /* Extended Driver Bits 2 */ + u8 hdmi_termination:1; + u8 cea861d_hdmi_support:1; + u8 self_refresh_enable:1; + u8 reserved1:5; + + u8 custom_vbt_version; /* 155+ */ + + /* Driver Feature Flags */ + u16 rmpm_enabled:1; /* 165+ */ + u16 s2ddt_enabled:1; /* 165+ */ + u16 dpst_enabled:1; /* 165-227 */ + u16 bltclt_enabled:1; /* 165+ */ + u16 adb_enabled:1; /* 165-227 */ + u16 drrs_enabled:1; /* 165-227 */ + u16 grs_enabled:1; /* 165+ */ + u16 gpmt_enabled:1; /* 165+ */ + u16 tbt_enabled:1; /* 165+ */ + u16 psr_enabled:1; /* 165-227 */ + u16 ips_enabled:1; /* 165+ */ + u16 dpfs_enabled:1; /* 165+ */ + u16 dmrrs_enabled:1; /* 174-227 */ + u16 adt_enabled:1; /* ???-228 */ + u16 hpd_wake:1; /* 201-240 */ u16 pc_feature_valid:1; } __packed; @@ -657,7 +703,7 @@ struct bdb_sdvo_panel_dtds { struct edp_fast_link_params { - u8 rate:4; + u8 rate:4; /* ???-223 */ u8 lanes:4; u8 preemphasis:4; u8 vswing:4; @@ -690,18 +736,18 @@ struct bdb_edp { u32 sdrrs_msa_timing_delay; /* ith bit indicates enabled/disabled for (i+1)th panel */ - u16 edp_s3d_feature; /* 162 */ - u16 edp_t3_optimization; /* 165 */ - u64 edp_vswing_preemph; /* 173 */ - u16 fast_link_training; /* 182 */ - u16 dpcd_600h_write_required; /* 185 */ - struct edp_pwm_delays pwm_delays[16]; /* 186 */ - u16 full_link_params_provided; /* 199 */ - struct edp_full_link_params full_link_params[16]; /* 199 */ - u16 apical_enable; /* 203 */ - struct edp_apical_params apical_params[16]; /* 203 */ - u16 edp_fast_link_training_rate[16]; /* 224 */ - u16 edp_max_port_link_rate[16]; /* 244 */ + u16 edp_s3d_feature; /* 162+ */ + u16 edp_t3_optimization; /* 165+ */ + u64 edp_vswing_preemph; /* 173+ */ + u16 fast_link_training; /* 182+ */ + u16 dpcd_600h_write_required; /* 185+ */ + struct edp_pwm_delays pwm_delays[16]; /* 186+ */ + u16 full_link_params_provided; /* 199+ */ + struct edp_full_link_params full_link_params[16]; /* 199+ */ + u16 apical_enable; /* 203+ */ + struct edp_apical_params apical_params[16]; /* 203+ */ + u16 edp_fast_link_training_rate[16]; /* 224+ */ + u16 edp_max_port_link_rate[16]; /* 244+ */ } __packed; /* @@ -710,14 +756,14 @@ struct bdb_edp { struct bdb_lvds_options { u8 panel_type; - u8 panel_type2; /* 212 */ + u8 panel_type2; /* 212+ */ /* LVDS capabilities, stored in a dword */ u8 pfit_mode:2; u8 pfit_text_mode_enhanced:1; u8 pfit_gfx_mode_enhanced:1; u8 pfit_ratio_auto:1; u8 pixel_dither:1; - u8 lvds_edid:1; + u8 lvds_edid:1; /* ???-240 */ u8 rsvd2:1; u8 rsvd4; /* LVDS Panel channel bits stored here */ @@ -731,11 +777,11 @@ struct bdb_lvds_options { /* LVDS panel type bits stored here */ u32 dps_panel_type_bits; /* LVDS backlight control type bits stored here */ - u32 blt_control_type_bits; + u32 blt_control_type_bits; /* ???-240 */ - u16 lcdvcc_s0_enable; /* 200 */ - u32 rotation; /* 228 */ - u32 position; /* 240 */ + u16 lcdvcc_s0_enable; /* 200+ */ + u32 rotation; /* 228+ */ + u32 position; /* 240+ */ } __packed; /* @@ -756,7 +802,7 @@ struct lvds_lfp_data_ptr { struct bdb_lvds_lfp_data_ptrs { u8 lvds_entries; struct lvds_lfp_data_ptr ptr[16]; - struct lvds_lfp_data_ptr_table panel_name; /* 156-163? */ + struct lvds_lfp_data_ptr_table panel_name; /* (156-163?)+ */ } __packed; /* @@ -808,20 +854,20 @@ struct lvds_lfp_panel_name { } __packed; struct lvds_lfp_black_border { - u8 top; /* 227 */ - u8 bottom; /* 227 */ - u8 left; /* 238 */ - u8 right; /* 238 */ + u8 top; /* 227+ */ + u8 bottom; /* 227+ */ + u8 left; /* 238+ */ + u8 right; /* 238+ */ } __packed; struct bdb_lvds_lfp_data_tail { - struct lvds_lfp_panel_name panel_name[16]; /* 156-163? */ - u16 scaling_enable; /* 187 */ - u8 seamless_drrs_min_refresh_rate[16]; /* 188 */ - u8 pixel_overlap_count[16]; /* 208 */ - struct lvds_lfp_black_border black_border[16]; /* 227 */ - u16 dual_lfp_port_sync_enable; /* 231 */ - u16 gpu_dithering_for_banding_artifacts; /* 245 */ + struct lvds_lfp_panel_name panel_name[16]; /* (156-163?)+ */ + u16 scaling_enable; /* 187+ */ + u8 seamless_drrs_min_refresh_rate[16]; /* 188+ */ + u8 pixel_overlap_count[16]; /* 208+ */ + struct lvds_lfp_black_border black_border[16]; /* 227+ */ + u16 dual_lfp_port_sync_enable; /* 231+ */ + u16 gpu_dithering_for_banding_artifacts; /* 245+ */ } __packed; /* @@ -836,7 +882,7 @@ struct lfp_backlight_data_entry { u8 active_low_pwm:1; u8 obsolete1:5; u16 pwm_freq_hz; - u8 min_brightness; /* Obsolete from 234+ */ + u8 min_brightness; /* ???-233 */ u8 obsolete2; u8 obsolete3; } __packed; @@ -859,7 +905,7 @@ struct lfp_brightness_level { struct bdb_lfp_backlight_data { u8 entry_size; struct lfp_backlight_data_entry data[16]; - u8 level[16]; /* Obsolete from 234+ */ + u8 level[16]; /* ???-233 */ struct lfp_backlight_control_method backlight_control[16]; struct lfp_brightness_level brightness_level[16]; /* 234+ */ struct lfp_brightness_level brightness_min_level[16]; /* 234+ */ @@ -874,8 +920,8 @@ struct lfp_power_features { u8 reserved1:1; u8 power_conservation_pref:3; u8 reserved2:1; - u8 lace_enabled_status:1; - u8 lace_support:1; + u8 lace_enabled_status:1; /* 210+ */ + u8 lace_support:1; /* 210+ */ u8 als_enable:1; } __packed; @@ -895,24 +941,24 @@ struct aggressiveness_profile2_entry { } __packed; struct bdb_lfp_power { - struct lfp_power_features features; + struct lfp_power_features features; /* ???-227 */ struct als_data_entry als[5]; - u8 lace_aggressiveness_profile:3; + u8 lace_aggressiveness_profile:3; /* 210-227 */ u8 reserved1:5; - u16 dpst; - u16 psr; - u16 drrs; - u16 lace_support; - u16 adt; - u16 dmrrs; - u16 adb; - u16 lace_enabled_status; - struct aggressiveness_profile_entry aggressiveness[16]; - u16 hobl; /* 232+ */ - u16 vrr_feature_enabled; /* 233+ */ - u16 elp; /* 247+ */ - u16 opst; /* 247+ */ - struct aggressiveness_profile2_entry aggressiveness2[16]; /* 247+ */ + u16 dpst; /* 228+ */ + u16 psr; /* 228+ */ + u16 drrs; /* 228+ */ + u16 lace_support; /* 228+ */ + u16 adt; /* 228+ */ + u16 dmrrs; /* 228+ */ + u16 adb; /* 228+ */ + u16 lace_enabled_status; /* 228+ */ + struct aggressiveness_profile_entry aggressiveness[16]; /* 228+ */ + u16 hobl; /* 232+ */ + u16 vrr_feature_enabled; /* 233+ */ + u16 elp; /* 247+ */ + u16 opst; /* 247+ */ + struct aggressiveness_profile2_entry aggressiveness2[16]; /* 247+ */ } __packed; /* @@ -922,10 +968,10 @@ struct bdb_lfp_power { #define MAX_MIPI_CONFIGURATIONS 6 struct bdb_mipi_config { - struct mipi_config config[MAX_MIPI_CONFIGURATIONS]; /* 175 */ - struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS]; /* 177 */ - struct edp_pwm_delays pwm_delays[MAX_MIPI_CONFIGURATIONS]; /* 186 */ - u8 pmic_i2c_bus_number[MAX_MIPI_CONFIGURATIONS]; /* 190 */ + struct mipi_config config[MAX_MIPI_CONFIGURATIONS]; /* 175+ */ + struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS]; /* 177+ */ + struct edp_pwm_delays pwm_delays[MAX_MIPI_CONFIGURATIONS]; /* 186+ */ + u8 pmic_i2c_bus_number[MAX_MIPI_CONFIGURATIONS]; /* 190+ */ } __packed; /* diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index 43e1bbc1e303..269f9792390d 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -344,7 +344,7 @@ bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state) struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - if (!INTEL_INFO(i915)->display.has_dsc) + if (!RUNTIME_INFO(i915)->has_dsc) return false; if (DISPLAY_VER(i915) >= 12) @@ -460,7 +460,6 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) u8 i = 0; vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay; - vdsc_cfg->pic_height = pipe_config->hw.adjusted_mode.crtc_vdisplay; vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width, pipe_config->dsc.slice_count); @@ -597,6 +596,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) DSC_VER_MIN_SHIFT | vdsc_cfg->bits_per_component << DSC_BPC_SHIFT | vdsc_cfg->line_buf_depth << DSC_LINE_BUF_DEPTH_SHIFT; + if (vdsc_cfg->dsc_version_minor == 2) + pps_val |= DSC_ALT_ICH_SEL; if (vdsc_cfg->block_pred_enable) pps_val |= DSC_BLOCK_PREDICTION; if (vdsc_cfg->convert_rgb) diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index 04250a0fec3c..5eac99021875 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -142,11 +142,16 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, * For XE_LPD+, we use guardband and pipeline override * is deprecated. */ - if (DISPLAY_VER(i915) >= 13) + if (DISPLAY_VER(i915) >= 13) { + /* + * FIXME: Subtract Window2 delay from below value. + * + * Window2 specifies time required to program DSB (Window2) in + * number of scan lines. Assuming 0 for no DSB. + */ crtc_state->vrr.guardband = - crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay - - i915->window2_delay; - else + crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay; + } else { /* * FIXME: s/4/framestart_delay/ to get consistent * earliest/latest points for register latching regardless @@ -159,6 +164,7 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, */ crtc_state->vrr.pipeline_full = min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay - 4 - 1); + } crtc_state->mode_flags |= I915_MODE_FLAG_VRR; } diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 4d6a27757065..7cb713043408 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -14,11 +14,11 @@ #include "intel_display_types.h" #include "intel_fb.h" #include "intel_fbc.h" -#include "intel_pm.h" #include "intel_psr.h" #include "intel_sprite.h" #include "skl_scaler.h" #include "skl_universal_plane.h" +#include "skl_watermark.h" #include "pxp/intel_pxp.h" static const u32 skl_plane_formats[] = { @@ -1928,7 +1928,7 @@ static enum intel_fbc_id skl_fbc_id_for_pipe(enum pipe pipe) static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, enum intel_fbc_id fbc_id, enum plane_id plane_id) { - if ((INTEL_INFO(dev_priv)->display.fbc_mask & BIT(fbc_id)) == 0) + if ((RUNTIME_INFO(dev_priv)->fbc_mask & BIT(fbc_id)) == 0) return false; return plane_id == PLANE_PRIMARY; @@ -1940,7 +1940,7 @@ static struct intel_fbc *skl_plane_fbc(struct drm_i915_private *dev_priv, enum intel_fbc_id fbc_id = skl_fbc_id_for_pipe(pipe); if (skl_plane_has_fbc(dev_priv, fbc_id, plane_id)) - return dev_priv->fbc[fbc_id]; + return dev_priv->display.fbc[fbc_id]; else return NULL; } diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c new file mode 100644 index 000000000000..01b0932757ed --- /dev/null +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -0,0 +1,3562 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include <drm/drm_blend.h> + +#include "intel_atomic.h" +#include "intel_atomic_plane.h" +#include "intel_bw.h" +#include "intel_de.h" +#include "intel_display.h" +#include "intel_display_power.h" +#include "intel_display_types.h" +#include "intel_fb.h" +#include "skl_watermark.h" + +#include "i915_drv.h" +#include "i915_fixed.h" +#include "i915_reg.h" +#include "intel_pcode.h" +#include "intel_pm.h" + +static void skl_sagv_disable(struct drm_i915_private *i915); + +/* Stores plane specific WM parameters */ +struct skl_wm_params { + bool x_tiled, y_tiled; + bool rc_surface; + bool is_planar; + u32 width; + u8 cpp; + u32 plane_pixel_rate; + u32 y_min_scanlines; + u32 plane_bytes_per_line; + uint_fixed_16_16_t plane_blocks_per_line; + uint_fixed_16_16_t y_tile_minimum; + u32 linetime_us; + u32 dbuf_block_size; +}; + +u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915) +{ + u8 enabled_slices = 0; + enum dbuf_slice slice; + + for_each_dbuf_slice(i915, slice) { + if (intel_uncore_read(&i915->uncore, + DBUF_CTL_S(slice)) & DBUF_POWER_STATE) + enabled_slices |= BIT(slice); + } + + return enabled_slices; +} + +/* + * FIXME: We still don't have the proper code detect if we need to apply the WA, + * so assume we'll always need it in order to avoid underruns. + */ +static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915) +{ + return DISPLAY_VER(i915) == 9; +} + +static bool +intel_has_sagv(struct drm_i915_private *i915) +{ + return DISPLAY_VER(i915) >= 9 && !IS_LP(i915) && + i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED; +} + +static u32 +intel_sagv_block_time(struct drm_i915_private *i915) +{ + if (DISPLAY_VER(i915) >= 14) { + u32 val; + + val = intel_uncore_read(&i915->uncore, MTL_LATENCY_SAGV); + + return REG_FIELD_GET(MTL_LATENCY_QCLK_SAGV, val); + } else if (DISPLAY_VER(i915) >= 12) { + u32 val = 0; + int ret; + + ret = snb_pcode_read(&i915->uncore, + GEN12_PCODE_READ_SAGV_BLOCK_TIME_US, + &val, NULL); + if (ret) { + drm_dbg_kms(&i915->drm, "Couldn't read SAGV block time!\n"); + return 0; + } + + return val; + } else if (DISPLAY_VER(i915) == 11) { + return 10; + } else if (DISPLAY_VER(i915) == 9 && !IS_LP(i915)) { + return 30; + } else { + return 0; + } +} + +static void intel_sagv_init(struct drm_i915_private *i915) +{ + if (!intel_has_sagv(i915)) + i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; + + /* + * Probe to see if we have working SAGV control. + * For icl+ this was already determined by intel_bw_init_hw(). + */ + if (DISPLAY_VER(i915) < 11) + skl_sagv_disable(i915); + + drm_WARN_ON(&i915->drm, i915->display.sagv.status == I915_SAGV_UNKNOWN); + + i915->display.sagv.block_time_us = intel_sagv_block_time(i915); + + drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n", + str_yes_no(intel_has_sagv(i915)), i915->display.sagv.block_time_us); + + /* avoid overflow when adding with wm0 latency/etc. */ + if (drm_WARN(&i915->drm, i915->display.sagv.block_time_us > U16_MAX, + "Excessive SAGV block time %u, ignoring\n", + i915->display.sagv.block_time_us)) + i915->display.sagv.block_time_us = 0; + + if (!intel_has_sagv(i915)) + i915->display.sagv.block_time_us = 0; +} + +/* + * SAGV dynamically adjusts the system agent voltage and clock frequencies + * depending on power and performance requirements. The display engine access + * to system memory is blocked during the adjustment time. Because of the + * blocking time, having this enabled can cause full system hangs and/or pipe + * underruns if we don't meet all of the following requirements: + * + * - <= 1 pipe enabled + * - All planes can enable watermarks for latencies >= SAGV engine block time + * - We're not using an interlaced display configuration + */ +static void skl_sagv_enable(struct drm_i915_private *i915) +{ + int ret; + + if (!intel_has_sagv(i915)) + return; + + if (i915->display.sagv.status == I915_SAGV_ENABLED) + return; + + drm_dbg_kms(&i915->drm, "Enabling SAGV\n"); + ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL, + GEN9_SAGV_ENABLE); + + /* We don't need to wait for SAGV when enabling */ + + /* + * Some skl systems, pre-release machines in particular, + * don't actually have SAGV. + */ + if (IS_SKYLAKE(i915) && ret == -ENXIO) { + drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n"); + i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; + return; + } else if (ret < 0) { + drm_err(&i915->drm, "Failed to enable SAGV\n"); + return; + } + + i915->display.sagv.status = I915_SAGV_ENABLED; +} + +static void skl_sagv_disable(struct drm_i915_private *i915) +{ + int ret; + + if (!intel_has_sagv(i915)) + return; + + if (i915->display.sagv.status == I915_SAGV_DISABLED) + return; + + drm_dbg_kms(&i915->drm, "Disabling SAGV\n"); + /* bspec says to keep retrying for at least 1 ms */ + ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL, + GEN9_SAGV_DISABLE, + GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, + 1); + /* + * Some skl systems, pre-release machines in particular, + * don't actually have SAGV. + */ + if (IS_SKYLAKE(i915) && ret == -ENXIO) { + drm_dbg(&i915->drm, "No SAGV found on system, ignoring\n"); + i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; + return; + } else if (ret < 0) { + drm_err(&i915->drm, "Failed to disable SAGV (%d)\n", ret); + return; + } + + i915->display.sagv.status = I915_SAGV_DISABLED; +} + +static void skl_sagv_pre_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_bw_state *new_bw_state = + intel_atomic_get_new_bw_state(state); + + if (!new_bw_state) + return; + + if (!intel_can_enable_sagv(i915, new_bw_state)) + skl_sagv_disable(i915); +} + +static void skl_sagv_post_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_bw_state *new_bw_state = + intel_atomic_get_new_bw_state(state); + + if (!new_bw_state) + return; + + if (intel_can_enable_sagv(i915, new_bw_state)) + skl_sagv_enable(i915); +} + +static void icl_sagv_pre_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_bw_state *old_bw_state = + intel_atomic_get_old_bw_state(state); + const struct intel_bw_state *new_bw_state = + intel_atomic_get_new_bw_state(state); + u16 old_mask, new_mask; + + if (!new_bw_state) + return; + + old_mask = old_bw_state->qgv_points_mask; + new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask; + + if (old_mask == new_mask) + return; + + WARN_ON(!new_bw_state->base.changed); + + drm_dbg_kms(&i915->drm, "Restricting QGV points: 0x%x -> 0x%x\n", + old_mask, new_mask); + + /* + * Restrict required qgv points before updating the configuration. + * According to BSpec we can't mask and unmask qgv points at the same + * time. Also masking should be done before updating the configuration + * and unmasking afterwards. + */ + icl_pcode_restrict_qgv_points(i915, new_mask); +} + +static void icl_sagv_post_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_bw_state *old_bw_state = + intel_atomic_get_old_bw_state(state); + const struct intel_bw_state *new_bw_state = + intel_atomic_get_new_bw_state(state); + u16 old_mask, new_mask; + + if (!new_bw_state) + return; + + old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask; + new_mask = new_bw_state->qgv_points_mask; + + if (old_mask == new_mask) + return; + + WARN_ON(!new_bw_state->base.changed); + + drm_dbg_kms(&i915->drm, "Relaxing QGV points: 0x%x -> 0x%x\n", + old_mask, new_mask); + + /* + * Allow required qgv points after updating the configuration. + * According to BSpec we can't mask and unmask qgv points at the same + * time. Also masking should be done before updating the configuration + * and unmasking afterwards. + */ + icl_pcode_restrict_qgv_points(i915, new_mask); +} + +void intel_sagv_pre_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + + /* + * Just return if we can't control SAGV or don't have it. + * This is different from situation when we have SAGV but just can't + * afford it due to DBuf limitation - in case if SAGV is completely + * disabled in a BIOS, we are not even allowed to send a PCode request, + * as it will throw an error. So have to check it here. + */ + if (!intel_has_sagv(i915)) + return; + + if (DISPLAY_VER(i915) >= 11) + icl_sagv_pre_plane_update(state); + else + skl_sagv_pre_plane_update(state); +} + +void intel_sagv_post_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + + /* + * Just return if we can't control SAGV or don't have it. + * This is different from situation when we have SAGV but just can't + * afford it due to DBuf limitation - in case if SAGV is completely + * disabled in a BIOS, we are not even allowed to send a PCode request, + * as it will throw an error. So have to check it here. + */ + if (!intel_has_sagv(i915)) + return; + + if (DISPLAY_VER(i915) >= 11) + icl_sagv_post_plane_update(state); + else + skl_sagv_post_plane_update(state); +} + +static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + enum plane_id plane_id; + int max_level = INT_MAX; + + if (!intel_has_sagv(i915)) + return false; + + if (!crtc_state->hw.active) + return true; + + if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE) + return false; + + for_each_plane_id_on_crtc(crtc, plane_id) { + const struct skl_plane_wm *wm = + &crtc_state->wm.skl.optimal.planes[plane_id]; + int level; + + /* Skip this plane if it's not enabled */ + if (!wm->wm[0].enable) + continue; + + /* Find the highest enabled wm level for this plane */ + for (level = ilk_wm_max_level(i915); + !wm->wm[level].enable; --level) + { } + + /* Highest common enabled wm level for all planes */ + max_level = min(level, max_level); + } + + /* No enabled planes? */ + if (max_level == INT_MAX) + return true; + + for_each_plane_id_on_crtc(crtc, plane_id) { + const struct skl_plane_wm *wm = + &crtc_state->wm.skl.optimal.planes[plane_id]; + + /* + * All enabled planes must have enabled a common wm level that + * can tolerate memory latencies higher than sagv_block_time_us + */ + if (wm->wm[0].enable && !wm->wm[max_level].can_sagv) + return false; + } + + return true; +} + +static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + enum plane_id plane_id; + + if (!crtc_state->hw.active) + return true; + + for_each_plane_id_on_crtc(crtc, plane_id) { + const struct skl_plane_wm *wm = + &crtc_state->wm.skl.optimal.planes[plane_id]; + + if (wm->wm[0].enable && !wm->sagv.wm0.enable) + return false; + } + + return true; +} + +static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + + if (DISPLAY_VER(i915) >= 12) + return tgl_crtc_can_enable_sagv(crtc_state); + else + return skl_crtc_can_enable_sagv(crtc_state); +} + +bool intel_can_enable_sagv(struct drm_i915_private *i915, + const struct intel_bw_state *bw_state) +{ + if (DISPLAY_VER(i915) < 11 && + bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes)) + return false; + + return bw_state->pipe_sagv_reject == 0; +} + +static int intel_compute_sagv_mask(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + int ret; + struct intel_crtc *crtc; + struct intel_crtc_state *new_crtc_state; + struct intel_bw_state *new_bw_state = NULL; + const struct intel_bw_state *old_bw_state = NULL; + int i; + + for_each_new_intel_crtc_in_state(state, crtc, + new_crtc_state, i) { + new_bw_state = intel_atomic_get_bw_state(state); + if (IS_ERR(new_bw_state)) + return PTR_ERR(new_bw_state); + + old_bw_state = intel_atomic_get_old_bw_state(state); + + if (intel_crtc_can_enable_sagv(new_crtc_state)) + new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe); + else + new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe); + } + + if (!new_bw_state) + return 0; + + new_bw_state->active_pipes = + intel_calc_active_pipes(state, old_bw_state->active_pipes); + + if (new_bw_state->active_pipes != old_bw_state->active_pipes) { + ret = intel_atomic_lock_global_state(&new_bw_state->base); + if (ret) + return ret; + } + + if (intel_can_enable_sagv(i915, new_bw_state) != + intel_can_enable_sagv(i915, old_bw_state)) { + ret = intel_atomic_serialize_global_state(&new_bw_state->base); + if (ret) + return ret; + } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { + ret = intel_atomic_lock_global_state(&new_bw_state->base); + if (ret) + return ret; + } + + for_each_new_intel_crtc_in_state(state, crtc, + new_crtc_state, i) { + struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal; + + /* + * We store use_sagv_wm in the crtc state rather than relying on + * that bw state since we have no convenient way to get at the + * latter from the plane commit hooks (especially in the legacy + * cursor case) + */ + pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) && + DISPLAY_VER(i915) >= 12 && + intel_can_enable_sagv(i915, new_bw_state); + } + + return 0; +} + +static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry, + u16 start, u16 end) +{ + entry->start = start; + entry->end = end; + + return end; +} + +static int intel_dbuf_slice_size(struct drm_i915_private *i915) +{ + return INTEL_INFO(i915)->display.dbuf.size / + hweight8(INTEL_INFO(i915)->display.dbuf.slice_mask); +} + +static void +skl_ddb_entry_for_slices(struct drm_i915_private *i915, u8 slice_mask, + struct skl_ddb_entry *ddb) +{ + int slice_size = intel_dbuf_slice_size(i915); + + if (!slice_mask) { + ddb->start = 0; + ddb->end = 0; + return; + } + + ddb->start = (ffs(slice_mask) - 1) * slice_size; + ddb->end = fls(slice_mask) * slice_size; + + WARN_ON(ddb->start >= ddb->end); + WARN_ON(ddb->end > INTEL_INFO(i915)->display.dbuf.size); +} + +static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask) +{ + struct skl_ddb_entry ddb; + + if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2))) + slice_mask = BIT(DBUF_S1); + else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4))) + slice_mask = BIT(DBUF_S3); + + skl_ddb_entry_for_slices(i915, slice_mask, &ddb); + + return ddb.start; +} + +u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915, + const struct skl_ddb_entry *entry) +{ + int slice_size = intel_dbuf_slice_size(i915); + enum dbuf_slice start_slice, end_slice; + u8 slice_mask = 0; + + if (!skl_ddb_entry_size(entry)) + return 0; + + start_slice = entry->start / slice_size; + end_slice = (entry->end - 1) / slice_size; + + /* + * Per plane DDB entry can in a really worst case be on multiple slices + * but single entry is anyway contigious. + */ + while (start_slice <= end_slice) { + slice_mask |= BIT(start_slice); + start_slice++; + } + + return slice_mask; +} + +static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state) +{ + const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; + int hdisplay, vdisplay; + + if (!crtc_state->hw.active) + return 0; + + /* + * Watermark/ddb requirement highly depends upon width of the + * framebuffer, So instead of allocating DDB equally among pipes + * distribute DDB based on resolution/width of the display. + */ + drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay); + + return hdisplay; +} + +static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state, + enum pipe for_pipe, + unsigned int *weight_start, + unsigned int *weight_end, + unsigned int *weight_total) +{ + struct drm_i915_private *i915 = + to_i915(dbuf_state->base.state->base.dev); + enum pipe pipe; + + *weight_start = 0; + *weight_end = 0; + *weight_total = 0; + + for_each_pipe(i915, pipe) { + int weight = dbuf_state->weight[pipe]; + + /* + * Do not account pipes using other slice sets + * luckily as of current BSpec slice sets do not partially + * intersect(pipes share either same one slice or same slice set + * i.e no partial intersection), so it is enough to check for + * equality for now. + */ + if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe]) + continue; + + *weight_total += weight; + if (pipe < for_pipe) { + *weight_start += weight; + *weight_end += weight; + } else if (pipe == for_pipe) { + *weight_end += weight; + } + } +} + +static int +skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + unsigned int weight_total, weight_start, weight_end; + const struct intel_dbuf_state *old_dbuf_state = + intel_atomic_get_old_dbuf_state(state); + struct intel_dbuf_state *new_dbuf_state = + intel_atomic_get_new_dbuf_state(state); + struct intel_crtc_state *crtc_state; + struct skl_ddb_entry ddb_slices; + enum pipe pipe = crtc->pipe; + unsigned int mbus_offset = 0; + u32 ddb_range_size; + u32 dbuf_slice_mask; + u32 start, end; + int ret; + + if (new_dbuf_state->weight[pipe] == 0) { + skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0); + goto out; + } + + dbuf_slice_mask = new_dbuf_state->slices[pipe]; + + skl_ddb_entry_for_slices(i915, dbuf_slice_mask, &ddb_slices); + mbus_offset = mbus_ddb_offset(i915, dbuf_slice_mask); + ddb_range_size = skl_ddb_entry_size(&ddb_slices); + + intel_crtc_dbuf_weights(new_dbuf_state, pipe, + &weight_start, &weight_end, &weight_total); + + start = ddb_range_size * weight_start / weight_total; + end = ddb_range_size * weight_end / weight_total; + + skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], + ddb_slices.start - mbus_offset + start, + ddb_slices.start - mbus_offset + end); + +out: + if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] && + skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe], + &new_dbuf_state->ddb[pipe])) + return 0; + + ret = intel_atomic_lock_global_state(&new_dbuf_state->base); + if (ret) + return ret; + + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + /* + * Used for checking overlaps, so we need absolute + * offsets instead of MBUS relative offsets. + */ + crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start; + crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end; + + drm_dbg_kms(&i915->drm, + "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n", + crtc->base.base.id, crtc->base.name, + old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe], + old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end, + new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end, + old_dbuf_state->active_pipes, new_dbuf_state->active_pipes); + + return 0; +} + +static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state, + int width, const struct drm_format_info *format, + u64 modifier, unsigned int rotation, + u32 plane_pixel_rate, struct skl_wm_params *wp, + int color_plane); + +static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, + struct intel_plane *plane, + int level, + unsigned int latency, + const struct skl_wm_params *wp, + const struct skl_wm_level *result_prev, + struct skl_wm_level *result /* out */); + +static unsigned int +skl_cursor_allocation(const struct intel_crtc_state *crtc_state, + int num_active) +{ + struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor); + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + int level, max_level = ilk_wm_max_level(i915); + struct skl_wm_level wm = {}; + int ret, min_ddb_alloc = 0; + struct skl_wm_params wp; + + ret = skl_compute_wm_params(crtc_state, 256, + drm_format_info(DRM_FORMAT_ARGB8888), + DRM_FORMAT_MOD_LINEAR, + DRM_MODE_ROTATE_0, + crtc_state->pixel_rate, &wp, 0); + drm_WARN_ON(&i915->drm, ret); + + for (level = 0; level <= max_level; level++) { + unsigned int latency = i915->display.wm.skl_latency[level]; + + skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm); + if (wm.min_ddb_alloc == U16_MAX) + break; + + min_ddb_alloc = wm.min_ddb_alloc; + } + + return max(num_active == 1 ? 32 : 8, min_ddb_alloc); +} + +static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg) +{ + skl_ddb_entry_init(entry, + REG_FIELD_GET(PLANE_BUF_START_MASK, reg), + REG_FIELD_GET(PLANE_BUF_END_MASK, reg)); + if (entry->end) + entry->end++; +} + +static void +skl_ddb_get_hw_plane_state(struct drm_i915_private *i915, + const enum pipe pipe, + const enum plane_id plane_id, + struct skl_ddb_entry *ddb, + struct skl_ddb_entry *ddb_y) +{ + u32 val; + + /* Cursor doesn't support NV12/planar, so no extra calculation needed */ + if (plane_id == PLANE_CURSOR) { + val = intel_uncore_read(&i915->uncore, CUR_BUF_CFG(pipe)); + skl_ddb_entry_init_from_hw(ddb, val); + return; + } + + val = intel_uncore_read(&i915->uncore, PLANE_BUF_CFG(pipe, plane_id)); + skl_ddb_entry_init_from_hw(ddb, val); + + if (DISPLAY_VER(i915) >= 11) + return; + + val = intel_uncore_read(&i915->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id)); + skl_ddb_entry_init_from_hw(ddb_y, val); +} + +static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, + struct skl_ddb_entry *ddb, + struct skl_ddb_entry *ddb_y) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + enum intel_display_power_domain power_domain; + enum pipe pipe = crtc->pipe; + intel_wakeref_t wakeref; + enum plane_id plane_id; + + power_domain = POWER_DOMAIN_PIPE(pipe); + wakeref = intel_display_power_get_if_enabled(i915, power_domain); + if (!wakeref) + return; + + for_each_plane_id_on_crtc(crtc, plane_id) + skl_ddb_get_hw_plane_state(i915, pipe, + plane_id, + &ddb[plane_id], + &ddb_y[plane_id]); + + intel_display_power_put(i915, power_domain, wakeref); +} + +struct dbuf_slice_conf_entry { + u8 active_pipes; + u8 dbuf_mask[I915_MAX_PIPES]; + bool join_mbus; +}; + +/* + * Table taken from Bspec 12716 + * Pipes do have some preferred DBuf slice affinity, + * plus there are some hardcoded requirements on how + * those should be distributed for multipipe scenarios. + * For more DBuf slices algorithm can get even more messy + * and less readable, so decided to use a table almost + * as is from BSpec itself - that way it is at least easier + * to compare, change and check. + */ +static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] = +/* Autogenerated with igt/tools/intel_dbuf_map tool: */ +{ + { + .active_pipes = BIT(PIPE_A), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + }, + }, + { + .active_pipes = BIT(PIPE_B), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_B] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_C), + .dbuf_mask = { + [PIPE_C] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_B] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + }, + }, + {} +}; + +/* + * Table taken from Bspec 49255 + * Pipes do have some preferred DBuf slice affinity, + * plus there are some hardcoded requirements on how + * those should be distributed for multipipe scenarios. + * For more DBuf slices algorithm can get even more messy + * and less readable, so decided to use a table almost + * as is from BSpec itself - that way it is at least easier + * to compare, change and check. + */ +static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] = +/* Autogenerated with igt/tools/intel_dbuf_map tool: */ +{ + { + .active_pipes = BIT(PIPE_A), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_B), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S2), + [PIPE_B] = BIT(DBUF_S1), + }, + }, + { + .active_pipes = BIT(PIPE_C), + .dbuf_mask = { + [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_B] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_D), + .dbuf_mask = { + [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_D] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1), + [PIPE_D] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_B] = BIT(DBUF_S1), + [PIPE_D] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_C] = BIT(DBUF_S1), + [PIPE_D] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + [PIPE_D] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + [PIPE_D] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_B] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + [PIPE_D] = BIT(DBUF_S2), + }, + }, + {} +}; + +static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = { + { + .active_pipes = BIT(PIPE_A), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_B), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_B] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_C), + .dbuf_mask = { + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_B] = BIT(DBUF_S2), + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_D), + .dbuf_mask = { + [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_B] = BIT(DBUF_S2), + [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_C] = BIT(DBUF_S3), + [PIPE_D] = BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_C] = BIT(DBUF_S3), + [PIPE_D] = BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_C] = BIT(DBUF_S3), + [PIPE_D] = BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_B] = BIT(DBUF_S2), + [PIPE_C] = BIT(DBUF_S3), + [PIPE_D] = BIT(DBUF_S4), + }, + }, + {} +}; + +static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = { + /* + * Keep the join_mbus cases first so check_mbus_joined() + * will prefer them over the !join_mbus cases. + */ + { + .active_pipes = BIT(PIPE_A), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4), + }, + .join_mbus = true, + }, + { + .active_pipes = BIT(PIPE_B), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4), + }, + .join_mbus = true, + }, + { + .active_pipes = BIT(PIPE_A), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + .join_mbus = false, + }, + { + .active_pipes = BIT(PIPE_B), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + .join_mbus = false, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_C), + .dbuf_mask = { + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + }, + }, + { + .active_pipes = BIT(PIPE_D), + .dbuf_mask = { + [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), + [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), + [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), + [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), + [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + {} + +}; + +static bool check_mbus_joined(u8 active_pipes, + const struct dbuf_slice_conf_entry *dbuf_slices) +{ + int i; + + for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { + if (dbuf_slices[i].active_pipes == active_pipes) + return dbuf_slices[i].join_mbus; + } + return false; +} + +static bool adlp_check_mbus_joined(u8 active_pipes) +{ + return check_mbus_joined(active_pipes, adlp_allowed_dbufs); +} + +static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus, + const struct dbuf_slice_conf_entry *dbuf_slices) +{ + int i; + + for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { + if (dbuf_slices[i].active_pipes == active_pipes && + dbuf_slices[i].join_mbus == join_mbus) + return dbuf_slices[i].dbuf_mask[pipe]; + } + return 0; +} + +/* + * This function finds an entry with same enabled pipe configuration and + * returns correspondent DBuf slice mask as stated in BSpec for particular + * platform. + */ +static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) +{ + /* + * FIXME: For ICL this is still a bit unclear as prev BSpec revision + * required calculating "pipe ratio" in order to determine + * if one or two slices can be used for single pipe configurations + * as additional constraint to the existing table. + * However based on recent info, it should be not "pipe ratio" + * but rather ratio between pixel_rate and cdclk with additional + * constants, so for now we are using only table until this is + * clarified. Also this is the reason why crtc_state param is + * still here - we will need it once those additional constraints + * pop up. + */ + return compute_dbuf_slices(pipe, active_pipes, join_mbus, + icl_allowed_dbufs); +} + +static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) +{ + return compute_dbuf_slices(pipe, active_pipes, join_mbus, + tgl_allowed_dbufs); +} + +static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) +{ + return compute_dbuf_slices(pipe, active_pipes, join_mbus, + adlp_allowed_dbufs); +} + +static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) +{ + return compute_dbuf_slices(pipe, active_pipes, join_mbus, + dg2_allowed_dbufs); +} + +static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + if (IS_DG2(i915)) + return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus); + else if (DISPLAY_VER(i915) >= 13) + return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus); + else if (DISPLAY_VER(i915) == 12) + return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus); + else if (DISPLAY_VER(i915) == 11) + return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus); + /* + * For anything else just return one slice yet. + * Should be extended for other platforms. + */ + return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0; +} + +static bool +use_minimal_wm0_only(const struct intel_crtc_state *crtc_state, + struct intel_plane *plane) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + + return DISPLAY_VER(i915) >= 13 && + crtc_state->uapi.async_flip && + plane->async_flip; +} + +static u64 +skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + enum plane_id plane_id; + u64 data_rate = 0; + + for_each_plane_id_on_crtc(crtc, plane_id) { + if (plane_id == PLANE_CURSOR) + continue; + + data_rate += crtc_state->rel_data_rate[plane_id]; + + if (DISPLAY_VER(i915) < 11) + data_rate += crtc_state->rel_data_rate_y[plane_id]; + } + + return data_rate; +} + +static const struct skl_wm_level * +skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm, + enum plane_id plane_id, + int level) +{ + const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; + + if (level == 0 && pipe_wm->use_sagv_wm) + return &wm->sagv.wm0; + + return &wm->wm[level]; +} + +static const struct skl_wm_level * +skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm, + enum plane_id plane_id) +{ + const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; + + if (pipe_wm->use_sagv_wm) + return &wm->sagv.trans_wm; + + return &wm->trans_wm; +} + +/* + * We only disable the watermarks for each plane if + * they exceed the ddb allocation of said plane. This + * is done so that we don't end up touching cursor + * watermarks needlessly when some other plane reduces + * our max possible watermark level. + * + * Bspec has this to say about the PLANE_WM enable bit: + * "All the watermarks at this level for all enabled + * planes must be enabled before the level will be used." + * So this is actually safe to do. + */ +static void +skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb) +{ + if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) + memset(wm, 0, sizeof(*wm)); +} + +static void +skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm, + const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb) +{ + if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) || + uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) { + memset(wm, 0, sizeof(*wm)); + memset(uv_wm, 0, sizeof(*uv_wm)); + } +} + +static bool icl_need_wm1_wa(struct drm_i915_private *i915, + enum plane_id plane_id) +{ + /* + * Wa_1408961008:icl, ehl + * Wa_14012656716:tgl, adl + * Underruns with WM1+ disabled + */ + return DISPLAY_VER(i915) == 11 || + (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR); +} + +struct skl_plane_ddb_iter { + u64 data_rate; + u16 start, size; +}; + +static void +skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter, + struct skl_ddb_entry *ddb, + const struct skl_wm_level *wm, + u64 data_rate) +{ + u16 size, extra = 0; + + if (data_rate) { + extra = min_t(u16, iter->size, + DIV64_U64_ROUND_UP(iter->size * data_rate, + iter->data_rate)); + iter->size -= extra; + iter->data_rate -= data_rate; + } + + /* + * Keep ddb entry of all disabled planes explicitly zeroed + * to avoid skl_ddb_add_affected_planes() adding them to + * the state when other planes change their allocations. + */ + size = wm->min_ddb_alloc + extra; + if (size) + iter->start = skl_ddb_entry_init(ddb, iter->start, + iter->start + size); +} + +static int +skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_dbuf_state *dbuf_state = + intel_atomic_get_new_dbuf_state(state); + const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe]; + int num_active = hweight8(dbuf_state->active_pipes); + struct skl_plane_ddb_iter iter; + enum plane_id plane_id; + u16 cursor_size; + u32 blocks; + int level; + + /* Clear the partitioning for disabled planes. */ + memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb)); + memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y)); + + if (!crtc_state->hw.active) + return 0; + + iter.start = alloc->start; + iter.size = skl_ddb_entry_size(alloc); + if (iter.size == 0) + return 0; + + /* Allocate fixed number of blocks for cursor. */ + cursor_size = skl_cursor_allocation(crtc_state, num_active); + iter.size -= cursor_size; + skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR], + alloc->end - cursor_size, alloc->end); + + iter.data_rate = skl_total_relative_data_rate(crtc_state); + + /* + * Find the highest watermark level for which we can satisfy the block + * requirement of active planes. + */ + for (level = ilk_wm_max_level(i915); level >= 0; level--) { + blocks = 0; + for_each_plane_id_on_crtc(crtc, plane_id) { + const struct skl_plane_wm *wm = + &crtc_state->wm.skl.optimal.planes[plane_id]; + + if (plane_id == PLANE_CURSOR) { + const struct skl_ddb_entry *ddb = + &crtc_state->wm.skl.plane_ddb[plane_id]; + + if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) { + drm_WARN_ON(&i915->drm, + wm->wm[level].min_ddb_alloc != U16_MAX); + blocks = U32_MAX; + break; + } + continue; + } + + blocks += wm->wm[level].min_ddb_alloc; + blocks += wm->uv_wm[level].min_ddb_alloc; + } + + if (blocks <= iter.size) { + iter.size -= blocks; + break; + } + } + + if (level < 0) { + drm_dbg_kms(&i915->drm, + "Requested display configuration exceeds system DDB limitations"); + drm_dbg_kms(&i915->drm, "minimum required %d/%d\n", + blocks, iter.size); + return -EINVAL; + } + + /* avoid the WARN later when we don't allocate any extra DDB */ + if (iter.data_rate == 0) + iter.size = 0; + + /* + * Grant each plane the blocks it requires at the highest achievable + * watermark level, plus an extra share of the leftover blocks + * proportional to its relative data rate. + */ + for_each_plane_id_on_crtc(crtc, plane_id) { + struct skl_ddb_entry *ddb = + &crtc_state->wm.skl.plane_ddb[plane_id]; + struct skl_ddb_entry *ddb_y = + &crtc_state->wm.skl.plane_ddb_y[plane_id]; + const struct skl_plane_wm *wm = + &crtc_state->wm.skl.optimal.planes[plane_id]; + + if (plane_id == PLANE_CURSOR) + continue; + + if (DISPLAY_VER(i915) < 11 && + crtc_state->nv12_planes & BIT(plane_id)) { + skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level], + crtc_state->rel_data_rate_y[plane_id]); + skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level], + crtc_state->rel_data_rate[plane_id]); + } else { + skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level], + crtc_state->rel_data_rate[plane_id]); + } + } + drm_WARN_ON(&i915->drm, iter.size != 0 || iter.data_rate != 0); + + /* + * When we calculated watermark values we didn't know how high + * of a level we'd actually be able to hit, so we just marked + * all levels as "enabled." Go back now and disable the ones + * that aren't actually possible. + */ + for (level++; level <= ilk_wm_max_level(i915); level++) { + for_each_plane_id_on_crtc(crtc, plane_id) { + const struct skl_ddb_entry *ddb = + &crtc_state->wm.skl.plane_ddb[plane_id]; + const struct skl_ddb_entry *ddb_y = + &crtc_state->wm.skl.plane_ddb_y[plane_id]; + struct skl_plane_wm *wm = + &crtc_state->wm.skl.optimal.planes[plane_id]; + + if (DISPLAY_VER(i915) < 11 && + crtc_state->nv12_planes & BIT(plane_id)) + skl_check_nv12_wm_level(&wm->wm[level], + &wm->uv_wm[level], + ddb_y, ddb); + else + skl_check_wm_level(&wm->wm[level], ddb); + + if (icl_need_wm1_wa(i915, plane_id) && + level == 1 && wm->wm[0].enable) { + wm->wm[level].blocks = wm->wm[0].blocks; + wm->wm[level].lines = wm->wm[0].lines; + wm->wm[level].ignore_lines = wm->wm[0].ignore_lines; + } + } + } + + /* + * Go back and disable the transition and SAGV watermarks + * if it turns out we don't have enough DDB blocks for them. + */ + for_each_plane_id_on_crtc(crtc, plane_id) { + const struct skl_ddb_entry *ddb = + &crtc_state->wm.skl.plane_ddb[plane_id]; + const struct skl_ddb_entry *ddb_y = + &crtc_state->wm.skl.plane_ddb_y[plane_id]; + struct skl_plane_wm *wm = + &crtc_state->wm.skl.optimal.planes[plane_id]; + + if (DISPLAY_VER(i915) < 11 && + crtc_state->nv12_planes & BIT(plane_id)) { + skl_check_wm_level(&wm->trans_wm, ddb_y); + } else { + WARN_ON(skl_ddb_entry_size(ddb_y)); + + skl_check_wm_level(&wm->trans_wm, ddb); + } + + skl_check_wm_level(&wm->sagv.wm0, ddb); + skl_check_wm_level(&wm->sagv.trans_wm, ddb); + } + + return 0; +} + +/* + * The max latency should be 257 (max the punit can code is 255 and we add 2us + * for the read latency) and cpp should always be <= 8, so that + * should allow pixel_rate up to ~2 GHz which seems sufficient since max + * 2xcdclk is 1350 MHz and the pixel rate should never exceed that. + */ +static uint_fixed_16_16_t +skl_wm_method1(const struct drm_i915_private *i915, u32 pixel_rate, + u8 cpp, u32 latency, u32 dbuf_block_size) +{ + u32 wm_intermediate_val; + uint_fixed_16_16_t ret; + + if (latency == 0) + return FP_16_16_MAX; + + wm_intermediate_val = latency * pixel_rate * cpp; + ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size); + + if (DISPLAY_VER(i915) >= 10) + ret = add_fixed16_u32(ret, 1); + + return ret; +} + +static uint_fixed_16_16_t +skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency, + uint_fixed_16_16_t plane_blocks_per_line) +{ + u32 wm_intermediate_val; + uint_fixed_16_16_t ret; + + if (latency == 0) + return FP_16_16_MAX; + + wm_intermediate_val = latency * pixel_rate; + wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val, + pipe_htotal * 1000); + ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line); + return ret; +} + +static uint_fixed_16_16_t +intel_get_linetime_us(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + u32 pixel_rate; + u32 crtc_htotal; + uint_fixed_16_16_t linetime_us; + + if (!crtc_state->hw.active) + return u32_to_fixed16(0); + + pixel_rate = crtc_state->pixel_rate; + + if (drm_WARN_ON(&i915->drm, pixel_rate == 0)) + return u32_to_fixed16(0); + + crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal; + linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate); + + return linetime_us; +} + +static int +skl_compute_wm_params(const struct intel_crtc_state *crtc_state, + int width, const struct drm_format_info *format, + u64 modifier, unsigned int rotation, + u32 plane_pixel_rate, struct skl_wm_params *wp, + int color_plane) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + u32 interm_pbpl; + + /* only planar format has two planes */ + if (color_plane == 1 && + !intel_format_info_is_yuv_semiplanar(format, modifier)) { + drm_dbg_kms(&i915->drm, + "Non planar format have single plane\n"); + return -EINVAL; + } + + wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED || + modifier == I915_FORMAT_MOD_4_TILED || + modifier == I915_FORMAT_MOD_Yf_TILED || + modifier == I915_FORMAT_MOD_Y_TILED_CCS || + modifier == I915_FORMAT_MOD_Yf_TILED_CCS; + wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED; + wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS || + modifier == I915_FORMAT_MOD_Yf_TILED_CCS; + wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier); + + wp->width = width; + if (color_plane == 1 && wp->is_planar) + wp->width /= 2; + + wp->cpp = format->cpp[color_plane]; + wp->plane_pixel_rate = plane_pixel_rate; + + if (DISPLAY_VER(i915) >= 11 && + modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1) + wp->dbuf_block_size = 256; + else + wp->dbuf_block_size = 512; + + if (drm_rotation_90_or_270(rotation)) { + switch (wp->cpp) { + case 1: + wp->y_min_scanlines = 16; + break; + case 2: + wp->y_min_scanlines = 8; + break; + case 4: + wp->y_min_scanlines = 4; + break; + default: + MISSING_CASE(wp->cpp); + return -EINVAL; + } + } else { + wp->y_min_scanlines = 4; + } + + if (skl_needs_memory_bw_wa(i915)) + wp->y_min_scanlines *= 2; + + wp->plane_bytes_per_line = wp->width * wp->cpp; + if (wp->y_tiled) { + interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line * + wp->y_min_scanlines, + wp->dbuf_block_size); + + if (DISPLAY_VER(i915) >= 10) + interm_pbpl++; + + wp->plane_blocks_per_line = div_fixed16(interm_pbpl, + wp->y_min_scanlines); + } else { + interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line, + wp->dbuf_block_size); + + if (!wp->x_tiled || DISPLAY_VER(i915) >= 10) + interm_pbpl++; + + wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl); + } + + wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines, + wp->plane_blocks_per_line); + + wp->linetime_us = fixed16_to_u32_round_up(intel_get_linetime_us(crtc_state)); + + return 0; +} + +static int +skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + struct skl_wm_params *wp, int color_plane) +{ + const struct drm_framebuffer *fb = plane_state->hw.fb; + int width; + + /* + * Src coordinates are already rotated by 270 degrees for + * the 90/270 degree plane rotation cases (to match the + * GTT mapping), hence no need to account for rotation here. + */ + width = drm_rect_width(&plane_state->uapi.src) >> 16; + + return skl_compute_wm_params(crtc_state, width, + fb->format, fb->modifier, + plane_state->hw.rotation, + intel_plane_pixel_rate(crtc_state, plane_state), + wp, color_plane); +} + +static bool skl_wm_has_lines(struct drm_i915_private *i915, int level) +{ + if (DISPLAY_VER(i915) >= 10) + return true; + + /* The number of lines are ignored for the level 0 watermark. */ + return level > 0; +} + +static int skl_wm_max_lines(struct drm_i915_private *i915) +{ + if (DISPLAY_VER(i915) >= 13) + return 255; + else + return 31; +} + +static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, + struct intel_plane *plane, + int level, + unsigned int latency, + const struct skl_wm_params *wp, + const struct skl_wm_level *result_prev, + struct skl_wm_level *result /* out */) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + uint_fixed_16_16_t method1, method2; + uint_fixed_16_16_t selected_result; + u32 blocks, lines, min_ddb_alloc = 0; + + if (latency == 0 || + (use_minimal_wm0_only(crtc_state, plane) && level > 0)) { + /* reject it */ + result->min_ddb_alloc = U16_MAX; + return; + } + + /* + * WaIncreaseLatencyIPCEnabled: kbl,cfl + * Display WA #1141: kbl,cfl + */ + if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) && + skl_watermark_ipc_enabled(i915)) + latency += 4; + + if (skl_needs_memory_bw_wa(i915) && wp->x_tiled) + latency += 15; + + method1 = skl_wm_method1(i915, wp->plane_pixel_rate, + wp->cpp, latency, wp->dbuf_block_size); + method2 = skl_wm_method2(wp->plane_pixel_rate, + crtc_state->hw.pipe_mode.crtc_htotal, + latency, + wp->plane_blocks_per_line); + + if (wp->y_tiled) { + selected_result = max_fixed16(method2, wp->y_tile_minimum); + } else { + if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal / + wp->dbuf_block_size < 1) && + (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) { + selected_result = method2; + } else if (latency >= wp->linetime_us) { + if (DISPLAY_VER(i915) == 9) + selected_result = min_fixed16(method1, method2); + else + selected_result = method2; + } else { + selected_result = method1; + } + } + + blocks = fixed16_to_u32_round_up(selected_result) + 1; + /* + * Lets have blocks at minimum equivalent to plane_blocks_per_line + * as there will be at minimum one line for lines configuration. This + * is a work around for FIFO underruns observed with resolutions like + * 4k 60 Hz in single channel DRAM configurations. + * + * As per the Bspec 49325, if the ddb allocation can hold at least + * one plane_blocks_per_line, we should have selected method2 in + * the above logic. Assuming that modern versions have enough dbuf + * and method2 guarantees blocks equivalent to at least 1 line, + * select the blocks as plane_blocks_per_line. + * + * TODO: Revisit the logic when we have better understanding on DRAM + * channels' impact on the level 0 memory latency and the relevant + * wm calculations. + */ + if (skl_wm_has_lines(i915, level)) + blocks = max(blocks, + fixed16_to_u32_round_up(wp->plane_blocks_per_line)); + lines = div_round_up_fixed16(selected_result, + wp->plane_blocks_per_line); + + if (DISPLAY_VER(i915) == 9) { + /* Display WA #1125: skl,bxt,kbl */ + if (level == 0 && wp->rc_surface) + blocks += fixed16_to_u32_round_up(wp->y_tile_minimum); + + /* Display WA #1126: skl,bxt,kbl */ + if (level >= 1 && level <= 7) { + if (wp->y_tiled) { + blocks += fixed16_to_u32_round_up(wp->y_tile_minimum); + lines += wp->y_min_scanlines; + } else { + blocks++; + } + + /* + * Make sure result blocks for higher latency levels are + * at least as high as level below the current level. + * Assumption in DDB algorithm optimization for special + * cases. Also covers Display WA #1125 for RC. + */ + if (result_prev->blocks > blocks) + blocks = result_prev->blocks; + } + } + + if (DISPLAY_VER(i915) >= 11) { + if (wp->y_tiled) { + int extra_lines; + + if (lines % wp->y_min_scanlines == 0) + extra_lines = wp->y_min_scanlines; + else + extra_lines = wp->y_min_scanlines * 2 - + lines % wp->y_min_scanlines; + + min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines, + wp->plane_blocks_per_line); + } else { + min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10); + } + } + + if (!skl_wm_has_lines(i915, level)) + lines = 0; + + if (lines > skl_wm_max_lines(i915)) { + /* reject it */ + result->min_ddb_alloc = U16_MAX; + return; + } + + /* + * If lines is valid, assume we can use this watermark level + * for now. We'll come back and disable it after we calculate the + * DDB allocation if it turns out we don't actually have enough + * blocks to satisfy it. + */ + result->blocks = blocks; + result->lines = lines; + /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */ + result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1; + result->enable = true; + + if (DISPLAY_VER(i915) < 12 && i915->display.sagv.block_time_us) + result->can_sagv = latency >= i915->display.sagv.block_time_us; +} + +static void +skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, + struct intel_plane *plane, + const struct skl_wm_params *wm_params, + struct skl_wm_level *levels) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + int level, max_level = ilk_wm_max_level(i915); + struct skl_wm_level *result_prev = &levels[0]; + + for (level = 0; level <= max_level; level++) { + struct skl_wm_level *result = &levels[level]; + unsigned int latency = i915->display.wm.skl_latency[level]; + + skl_compute_plane_wm(crtc_state, plane, level, latency, + wm_params, result_prev, result); + + result_prev = result; + } +} + +static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state, + struct intel_plane *plane, + const struct skl_wm_params *wm_params, + struct skl_plane_wm *plane_wm) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0; + struct skl_wm_level *levels = plane_wm->wm; + unsigned int latency = 0; + + if (i915->display.sagv.block_time_us) + latency = i915->display.sagv.block_time_us + i915->display.wm.skl_latency[0]; + + skl_compute_plane_wm(crtc_state, plane, 0, latency, + wm_params, &levels[0], + sagv_wm); +} + +static void skl_compute_transition_wm(struct drm_i915_private *i915, + struct skl_wm_level *trans_wm, + const struct skl_wm_level *wm0, + const struct skl_wm_params *wp) +{ + u16 trans_min, trans_amount, trans_y_tile_min; + u16 wm0_blocks, trans_offset, blocks; + + /* Transition WM don't make any sense if ipc is disabled */ + if (!skl_watermark_ipc_enabled(i915)) + return; + + /* + * WaDisableTWM:skl,kbl,cfl,bxt + * Transition WM are not recommended by HW team for GEN9 + */ + if (DISPLAY_VER(i915) == 9) + return; + + if (DISPLAY_VER(i915) >= 11) + trans_min = 4; + else + trans_min = 14; + + /* Display WA #1140: glk,cnl */ + if (DISPLAY_VER(i915) == 10) + trans_amount = 0; + else + trans_amount = 10; /* This is configurable amount */ + + trans_offset = trans_min + trans_amount; + + /* + * The spec asks for Selected Result Blocks for wm0 (the real value), + * not Result Blocks (the integer value). Pay attention to the capital + * letters. The value wm_l0->blocks is actually Result Blocks, but + * since Result Blocks is the ceiling of Selected Result Blocks plus 1, + * and since we later will have to get the ceiling of the sum in the + * transition watermarks calculation, we can just pretend Selected + * Result Blocks is Result Blocks minus 1 and it should work for the + * current platforms. + */ + wm0_blocks = wm0->blocks - 1; + + if (wp->y_tiled) { + trans_y_tile_min = + (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum); + blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset; + } else { + blocks = wm0_blocks + trans_offset; + } + blocks++; + + /* + * Just assume we can enable the transition watermark. After + * computing the DDB we'll come back and disable it if that + * assumption turns out to be false. + */ + trans_wm->blocks = blocks; + trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1); + trans_wm->enable = true; +} + +static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + struct intel_plane *plane, int color_plane) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id]; + struct skl_wm_params wm_params; + int ret; + + ret = skl_compute_plane_wm_params(crtc_state, plane_state, + &wm_params, color_plane); + if (ret) + return ret; + + skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm); + + skl_compute_transition_wm(i915, &wm->trans_wm, + &wm->wm[0], &wm_params); + + if (DISPLAY_VER(i915) >= 12) { + tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm); + + skl_compute_transition_wm(i915, &wm->sagv.trans_wm, + &wm->sagv.wm0, &wm_params); + } + + return 0; +} + +static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + struct intel_plane *plane) +{ + struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id]; + struct skl_wm_params wm_params; + int ret; + + wm->is_planar = true; + + /* uv plane watermarks must also be validated for NV12/Planar */ + ret = skl_compute_plane_wm_params(crtc_state, plane_state, + &wm_params, 1); + if (ret) + return ret; + + skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm); + + return 0; +} + +static int skl_build_plane_wm(struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + enum plane_id plane_id = plane->id; + struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; + const struct drm_framebuffer *fb = plane_state->hw.fb; + int ret; + + memset(wm, 0, sizeof(*wm)); + + if (!intel_wm_plane_visible(crtc_state, plane_state)) + return 0; + + ret = skl_build_plane_wm_single(crtc_state, plane_state, + plane, 0); + if (ret) + return ret; + + if (fb->format->is_yuv && fb->format->num_planes > 1) { + ret = skl_build_plane_wm_uv(crtc_state, plane_state, + plane); + if (ret) + return ret; + } + + return 0; +} + +static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *i915 = to_i915(plane->base.dev); + enum plane_id plane_id = plane->id; + struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; + int ret; + + /* Watermarks calculated in master */ + if (plane_state->planar_slave) + return 0; + + memset(wm, 0, sizeof(*wm)); + + if (plane_state->planar_linked_plane) { + const struct drm_framebuffer *fb = plane_state->hw.fb; + + drm_WARN_ON(&i915->drm, + !intel_wm_plane_visible(crtc_state, plane_state)); + drm_WARN_ON(&i915->drm, !fb->format->is_yuv || + fb->format->num_planes == 1); + + ret = skl_build_plane_wm_single(crtc_state, plane_state, + plane_state->planar_linked_plane, 0); + if (ret) + return ret; + + ret = skl_build_plane_wm_single(crtc_state, plane_state, + plane, 1); + if (ret) + return ret; + } else if (intel_wm_plane_visible(crtc_state, plane_state)) { + ret = skl_build_plane_wm_single(crtc_state, plane_state, + plane, 0); + if (ret) + return ret; + } + + return 0; +} + +static int skl_build_pipe_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *plane_state; + struct intel_plane *plane; + int ret, i; + + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + /* + * FIXME should perhaps check {old,new}_plane_crtc->hw.crtc + * instead but we don't populate that correctly for NV12 Y + * planes so for now hack this. + */ + if (plane->pipe != crtc->pipe) + continue; + + if (DISPLAY_VER(i915) >= 11) + ret = icl_build_plane_wm(crtc_state, plane_state); + else + ret = skl_build_plane_wm(crtc_state, plane_state); + if (ret) + return ret; + } + + crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw; + + return 0; +} + +static void skl_ddb_entry_write(struct drm_i915_private *i915, + i915_reg_t reg, + const struct skl_ddb_entry *entry) +{ + if (entry->end) + intel_de_write_fw(i915, reg, + PLANE_BUF_END(entry->end - 1) | + PLANE_BUF_START(entry->start)); + else + intel_de_write_fw(i915, reg, 0); +} + +static void skl_write_wm_level(struct drm_i915_private *i915, + i915_reg_t reg, + const struct skl_wm_level *level) +{ + u32 val = 0; + + if (level->enable) + val |= PLANE_WM_EN; + if (level->ignore_lines) + val |= PLANE_WM_IGNORE_LINES; + val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks); + val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines); + + intel_de_write_fw(i915, reg, val); +} + +void skl_write_plane_wm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + int level, max_level = ilk_wm_max_level(i915); + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; + const struct skl_ddb_entry *ddb = + &crtc_state->wm.skl.plane_ddb[plane_id]; + const struct skl_ddb_entry *ddb_y = + &crtc_state->wm.skl.plane_ddb_y[plane_id]; + + for (level = 0; level <= max_level; level++) + skl_write_wm_level(i915, PLANE_WM(pipe, plane_id, level), + skl_plane_wm_level(pipe_wm, plane_id, level)); + + skl_write_wm_level(i915, PLANE_WM_TRANS(pipe, plane_id), + skl_plane_trans_wm(pipe_wm, plane_id)); + + if (HAS_HW_SAGV_WM(i915)) { + const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; + + skl_write_wm_level(i915, PLANE_WM_SAGV(pipe, plane_id), + &wm->sagv.wm0); + skl_write_wm_level(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id), + &wm->sagv.trans_wm); + } + + skl_ddb_entry_write(i915, + PLANE_BUF_CFG(pipe, plane_id), ddb); + + if (DISPLAY_VER(i915) < 11) + skl_ddb_entry_write(i915, + PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_y); +} + +void skl_write_cursor_wm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + int level, max_level = ilk_wm_max_level(i915); + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; + const struct skl_ddb_entry *ddb = + &crtc_state->wm.skl.plane_ddb[plane_id]; + + for (level = 0; level <= max_level; level++) + skl_write_wm_level(i915, CUR_WM(pipe, level), + skl_plane_wm_level(pipe_wm, plane_id, level)); + + skl_write_wm_level(i915, CUR_WM_TRANS(pipe), + skl_plane_trans_wm(pipe_wm, plane_id)); + + if (HAS_HW_SAGV_WM(i915)) { + const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; + + skl_write_wm_level(i915, CUR_WM_SAGV(pipe), + &wm->sagv.wm0); + skl_write_wm_level(i915, CUR_WM_SAGV_TRANS(pipe), + &wm->sagv.trans_wm); + } + + skl_ddb_entry_write(i915, CUR_BUF_CFG(pipe), ddb); +} + +static bool skl_wm_level_equals(const struct skl_wm_level *l1, + const struct skl_wm_level *l2) +{ + return l1->enable == l2->enable && + l1->ignore_lines == l2->ignore_lines && + l1->lines == l2->lines && + l1->blocks == l2->blocks; +} + +static bool skl_plane_wm_equals(struct drm_i915_private *i915, + const struct skl_plane_wm *wm1, + const struct skl_plane_wm *wm2) +{ + int level, max_level = ilk_wm_max_level(i915); + + for (level = 0; level <= max_level; level++) { + /* + * We don't check uv_wm as the hardware doesn't actually + * use it. It only gets used for calculating the required + * ddb allocation. + */ + if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level])) + return false; + } + + return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) && + skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) && + skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm); +} + +static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a, + const struct skl_ddb_entry *b) +{ + return a->start < b->end && b->start < a->end; +} + +static void skl_ddb_entry_union(struct skl_ddb_entry *a, + const struct skl_ddb_entry *b) +{ + if (a->end && b->end) { + a->start = min(a->start, b->start); + a->end = max(a->end, b->end); + } else if (b->end) { + a->start = b->start; + a->end = b->end; + } +} + +bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, + const struct skl_ddb_entry *entries, + int num_entries, int ignore_idx) +{ + int i; + + for (i = 0; i < num_entries; i++) { + if (i != ignore_idx && + skl_ddb_entries_overlap(ddb, &entries[i])) + return true; + } + + return false; +} + +static int +skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *new_crtc_state) +{ + struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state); + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_plane *plane; + + for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { + struct intel_plane_state *plane_state; + enum plane_id plane_id = plane->id; + + if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id], + &new_crtc_state->wm.skl.plane_ddb[plane_id]) && + skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id], + &new_crtc_state->wm.skl.plane_ddb_y[plane_id])) + continue; + + plane_state = intel_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) + return PTR_ERR(plane_state); + + new_crtc_state->update_planes |= BIT(plane_id); + } + + return 0; +} + +static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state) +{ + struct drm_i915_private *i915 = to_i915(dbuf_state->base.state->base.dev); + u8 enabled_slices; + enum pipe pipe; + + /* + * FIXME: For now we always enable slice S1 as per + * the Bspec display initialization sequence. + */ + enabled_slices = BIT(DBUF_S1); + + for_each_pipe(i915, pipe) + enabled_slices |= dbuf_state->slices[pipe]; + + return enabled_slices; +} + +static int +skl_compute_ddb(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_dbuf_state *old_dbuf_state; + struct intel_dbuf_state *new_dbuf_state = NULL; + const struct intel_crtc_state *old_crtc_state; + struct intel_crtc_state *new_crtc_state; + struct intel_crtc *crtc; + int ret, i; + + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + new_dbuf_state = intel_atomic_get_dbuf_state(state); + if (IS_ERR(new_dbuf_state)) + return PTR_ERR(new_dbuf_state); + + old_dbuf_state = intel_atomic_get_old_dbuf_state(state); + break; + } + + if (!new_dbuf_state) + return 0; + + new_dbuf_state->active_pipes = + intel_calc_active_pipes(state, old_dbuf_state->active_pipes); + + if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) { + ret = intel_atomic_lock_global_state(&new_dbuf_state->base); + if (ret) + return ret; + } + + if (HAS_MBUS_JOINING(i915)) + new_dbuf_state->joined_mbus = + adlp_check_mbus_joined(new_dbuf_state->active_pipes); + + for_each_intel_crtc(&i915->drm, crtc) { + enum pipe pipe = crtc->pipe; + + new_dbuf_state->slices[pipe] = + skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes, + new_dbuf_state->joined_mbus); + + if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe]) + continue; + + ret = intel_atomic_lock_global_state(&new_dbuf_state->base); + if (ret) + return ret; + } + + new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state); + + if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices || + old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) { + ret = intel_atomic_serialize_global_state(&new_dbuf_state->base); + if (ret) + return ret; + + if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) { + /* TODO: Implement vblank synchronized MBUS joining changes */ + ret = intel_modeset_all_pipes(state); + if (ret) + return ret; + } + + drm_dbg_kms(&i915->drm, + "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n", + old_dbuf_state->enabled_slices, + new_dbuf_state->enabled_slices, + INTEL_INFO(i915)->display.dbuf.slice_mask, + str_yes_no(old_dbuf_state->joined_mbus), + str_yes_no(new_dbuf_state->joined_mbus)); + } + + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + enum pipe pipe = crtc->pipe; + + new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state); + + if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe]) + continue; + + ret = intel_atomic_lock_global_state(&new_dbuf_state->base); + if (ret) + return ret; + } + + for_each_intel_crtc(&i915->drm, crtc) { + ret = skl_crtc_allocate_ddb(state, crtc); + if (ret) + return ret; + } + + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + ret = skl_crtc_allocate_plane_ddb(state, crtc); + if (ret) + return ret; + + ret = skl_ddb_add_affected_planes(old_crtc_state, + new_crtc_state); + if (ret) + return ret; + } + + return 0; +} + +static char enast(bool enable) +{ + return enable ? '*' : ' '; +} + +static void +skl_print_wm_changes(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_crtc_state *old_crtc_state; + const struct intel_crtc_state *new_crtc_state; + struct intel_plane *plane; + struct intel_crtc *crtc; + int i; + + if (!drm_debug_enabled(DRM_UT_KMS)) + return; + + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm; + + old_pipe_wm = &old_crtc_state->wm.skl.optimal; + new_pipe_wm = &new_crtc_state->wm.skl.optimal; + + for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { + enum plane_id plane_id = plane->id; + const struct skl_ddb_entry *old, *new; + + old = &old_crtc_state->wm.skl.plane_ddb[plane_id]; + new = &new_crtc_state->wm.skl.plane_ddb[plane_id]; + + if (skl_ddb_entry_equal(old, new)) + continue; + + drm_dbg_kms(&i915->drm, + "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n", + plane->base.base.id, plane->base.name, + old->start, old->end, new->start, new->end, + skl_ddb_entry_size(old), skl_ddb_entry_size(new)); + } + + for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { + enum plane_id plane_id = plane->id; + const struct skl_plane_wm *old_wm, *new_wm; + + old_wm = &old_pipe_wm->planes[plane_id]; + new_wm = &new_pipe_wm->planes[plane_id]; + + if (skl_plane_wm_equals(i915, old_wm, new_wm)) + continue; + + drm_dbg_kms(&i915->drm, + "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" + " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n", + plane->base.base.id, plane->base.name, + enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable), + enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable), + enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable), + enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable), + enast(old_wm->trans_wm.enable), + enast(old_wm->sagv.wm0.enable), + enast(old_wm->sagv.trans_wm.enable), + enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable), + enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable), + enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable), + enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable), + enast(new_wm->trans_wm.enable), + enast(new_wm->sagv.wm0.enable), + enast(new_wm->sagv.trans_wm.enable)); + + drm_dbg_kms(&i915->drm, + "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" + " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n", + plane->base.base.id, plane->base.name, + enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines, + enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines, + enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines, + enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines, + enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines, + enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines, + enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines, + enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines, + enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines, + enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, + enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines, + enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines, + enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines, + enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines, + enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines, + enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines, + enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines, + enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines, + enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines, + enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines, + enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines, + enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines); + + drm_dbg_kms(&i915->drm, + "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" + " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", + plane->base.base.id, plane->base.name, + old_wm->wm[0].blocks, old_wm->wm[1].blocks, + old_wm->wm[2].blocks, old_wm->wm[3].blocks, + old_wm->wm[4].blocks, old_wm->wm[5].blocks, + old_wm->wm[6].blocks, old_wm->wm[7].blocks, + old_wm->trans_wm.blocks, + old_wm->sagv.wm0.blocks, + old_wm->sagv.trans_wm.blocks, + new_wm->wm[0].blocks, new_wm->wm[1].blocks, + new_wm->wm[2].blocks, new_wm->wm[3].blocks, + new_wm->wm[4].blocks, new_wm->wm[5].blocks, + new_wm->wm[6].blocks, new_wm->wm[7].blocks, + new_wm->trans_wm.blocks, + new_wm->sagv.wm0.blocks, + new_wm->sagv.trans_wm.blocks); + + drm_dbg_kms(&i915->drm, + "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" + " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", + plane->base.base.id, plane->base.name, + old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc, + old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc, + old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc, + old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc, + old_wm->trans_wm.min_ddb_alloc, + old_wm->sagv.wm0.min_ddb_alloc, + old_wm->sagv.trans_wm.min_ddb_alloc, + new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc, + new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc, + new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc, + new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc, + new_wm->trans_wm.min_ddb_alloc, + new_wm->sagv.wm0.min_ddb_alloc, + new_wm->sagv.trans_wm.min_ddb_alloc); + } + } +} + +static bool skl_plane_selected_wm_equals(struct intel_plane *plane, + const struct skl_pipe_wm *old_pipe_wm, + const struct skl_pipe_wm *new_pipe_wm) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + int level, max_level = ilk_wm_max_level(i915); + + for (level = 0; level <= max_level; level++) { + /* + * We don't check uv_wm as the hardware doesn't actually + * use it. It only gets used for calculating the required + * ddb allocation. + */ + if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level), + skl_plane_wm_level(new_pipe_wm, plane->id, level))) + return false; + } + + if (HAS_HW_SAGV_WM(i915)) { + const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id]; + const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id]; + + if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) || + !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm)) + return false; + } + + return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id), + skl_plane_trans_wm(new_pipe_wm, plane->id)); +} + +/* + * To make sure the cursor watermark registers are always consistent + * with our computed state the following scenario needs special + * treatment: + * + * 1. enable cursor + * 2. move cursor entirely offscreen + * 3. disable cursor + * + * Step 2. does call .disable_plane() but does not zero the watermarks + * (since we consider an offscreen cursor still active for the purposes + * of watermarks). Step 3. would not normally call .disable_plane() + * because the actual plane visibility isn't changing, and we don't + * deallocate the cursor ddb until the pipe gets disabled. So we must + * force step 3. to call .disable_plane() to update the watermark + * registers properly. + * + * Other planes do not suffer from this issues as their watermarks are + * calculated based on the actual plane visibility. The only time this + * can trigger for the other planes is during the initial readout as the + * default value of the watermarks registers is not zero. + */ +static int skl_wm_add_affected_planes(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct intel_plane *plane; + + for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { + struct intel_plane_state *plane_state; + enum plane_id plane_id = plane->id; + + /* + * Force a full wm update for every plane on modeset. + * Required because the reset value of the wm registers + * is non-zero, whereas we want all disabled planes to + * have zero watermarks. So if we turn off the relevant + * power well the hardware state will go out of sync + * with the software state. + */ + if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) && + skl_plane_selected_wm_equals(plane, + &old_crtc_state->wm.skl.optimal, + &new_crtc_state->wm.skl.optimal)) + continue; + + plane_state = intel_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) + return PTR_ERR(plane_state); + + new_crtc_state->update_planes |= BIT(plane_id); + } + + return 0; +} + +static int +skl_compute_wm(struct intel_atomic_state *state) +{ + struct intel_crtc *crtc; + struct intel_crtc_state *new_crtc_state; + int ret, i; + + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + ret = skl_build_pipe_wm(state, crtc); + if (ret) + return ret; + } + + ret = skl_compute_ddb(state); + if (ret) + return ret; + + ret = intel_compute_sagv_mask(state); + if (ret) + return ret; + + /* + * skl_compute_ddb() will have adjusted the final watermarks + * based on how much ddb is available. Now we can actually + * check if the final watermarks changed. + */ + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + ret = skl_wm_add_affected_planes(state, crtc); + if (ret) + return ret; + } + + skl_print_wm_changes(state); + + return 0; +} + +static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level) +{ + level->enable = val & PLANE_WM_EN; + level->ignore_lines = val & PLANE_WM_IGNORE_LINES; + level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val); + level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val); +} + +static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, + struct skl_pipe_wm *out) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + int level, max_level; + enum plane_id plane_id; + u32 val; + + max_level = ilk_wm_max_level(i915); + + for_each_plane_id_on_crtc(crtc, plane_id) { + struct skl_plane_wm *wm = &out->planes[plane_id]; + + for (level = 0; level <= max_level; level++) { + if (plane_id != PLANE_CURSOR) + val = intel_uncore_read(&i915->uncore, PLANE_WM(pipe, plane_id, level)); + else + val = intel_uncore_read(&i915->uncore, CUR_WM(pipe, level)); + + skl_wm_level_from_reg_val(val, &wm->wm[level]); + } + + if (plane_id != PLANE_CURSOR) + val = intel_uncore_read(&i915->uncore, PLANE_WM_TRANS(pipe, plane_id)); + else + val = intel_uncore_read(&i915->uncore, CUR_WM_TRANS(pipe)); + + skl_wm_level_from_reg_val(val, &wm->trans_wm); + + if (HAS_HW_SAGV_WM(i915)) { + if (plane_id != PLANE_CURSOR) + val = intel_uncore_read(&i915->uncore, + PLANE_WM_SAGV(pipe, plane_id)); + else + val = intel_uncore_read(&i915->uncore, + CUR_WM_SAGV(pipe)); + + skl_wm_level_from_reg_val(val, &wm->sagv.wm0); + + if (plane_id != PLANE_CURSOR) + val = intel_uncore_read(&i915->uncore, + PLANE_WM_SAGV_TRANS(pipe, plane_id)); + else + val = intel_uncore_read(&i915->uncore, + CUR_WM_SAGV_TRANS(pipe)); + + skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm); + } else if (DISPLAY_VER(i915) >= 12) { + wm->sagv.wm0 = wm->wm[0]; + wm->sagv.trans_wm = wm->trans_wm; + } + } +} + +void skl_wm_get_hw_state(struct drm_i915_private *i915) +{ + struct intel_dbuf_state *dbuf_state = + to_intel_dbuf_state(i915->display.dbuf.obj.state); + struct intel_crtc *crtc; + + if (HAS_MBUS_JOINING(i915)) + dbuf_state->joined_mbus = intel_de_read(i915, MBUS_CTL) & MBUS_JOIN; + + for_each_intel_crtc(&i915->drm, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + enum pipe pipe = crtc->pipe; + unsigned int mbus_offset; + enum plane_id plane_id; + u8 slices; + + memset(&crtc_state->wm.skl.optimal, 0, + sizeof(crtc_state->wm.skl.optimal)); + if (crtc_state->hw.active) + skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal); + crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal; + + memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe])); + + for_each_plane_id_on_crtc(crtc, plane_id) { + struct skl_ddb_entry *ddb = + &crtc_state->wm.skl.plane_ddb[plane_id]; + struct skl_ddb_entry *ddb_y = + &crtc_state->wm.skl.plane_ddb_y[plane_id]; + + if (!crtc_state->hw.active) + continue; + + skl_ddb_get_hw_plane_state(i915, crtc->pipe, + plane_id, ddb, ddb_y); + + skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb); + skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y); + } + + dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state); + + /* + * Used for checking overlaps, so we need absolute + * offsets instead of MBUS relative offsets. + */ + slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes, + dbuf_state->joined_mbus); + mbus_offset = mbus_ddb_offset(i915, slices); + crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start; + crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end; + + /* The slices actually used by the planes on the pipe */ + dbuf_state->slices[pipe] = + skl_ddb_dbuf_slice_mask(i915, &crtc_state->wm.skl.ddb); + + drm_dbg_kms(&i915->drm, + "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n", + crtc->base.base.id, crtc->base.name, + dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start, + dbuf_state->ddb[pipe].end, dbuf_state->active_pipes, + str_yes_no(dbuf_state->joined_mbus)); + } + + dbuf_state->enabled_slices = i915->display.dbuf.enabled_slices; +} + +static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915) +{ + const struct intel_dbuf_state *dbuf_state = + to_intel_dbuf_state(i915->display.dbuf.obj.state); + struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; + struct intel_crtc *crtc; + + for_each_intel_crtc(&i915->drm, crtc) { + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + entries[crtc->pipe] = crtc_state->wm.skl.ddb; + } + + for_each_intel_crtc(&i915->drm, crtc) { + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + u8 slices; + + slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes, + dbuf_state->joined_mbus); + if (dbuf_state->slices[crtc->pipe] & ~slices) + return true; + + if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries, + I915_MAX_PIPES, crtc->pipe)) + return true; + } + + return false; +} + +void skl_wm_sanitize(struct drm_i915_private *i915) +{ + struct intel_crtc *crtc; + + /* + * On TGL/RKL (at least) the BIOS likes to assign the planes + * to the wrong DBUF slices. This will cause an infinite loop + * in skl_commit_modeset_enables() as it can't find a way to + * transition between the old bogus DBUF layout to the new + * proper DBUF layout without DBUF allocation overlaps between + * the planes (which cannot be allowed or else the hardware + * may hang). If we detect a bogus DBUF layout just turn off + * all the planes so that skl_commit_modeset_enables() can + * simply ignore them. + */ + if (!skl_dbuf_is_misconfigured(i915)) + return; + + drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n"); + + for_each_intel_crtc(&i915->drm, crtc) { + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + if (plane_state->uapi.visible) + intel_plane_disable_noatomic(crtc, plane); + + drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0); + + memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb)); + } +} + +void intel_wm_state_verify(struct intel_crtc *crtc, + struct intel_crtc_state *new_crtc_state) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct skl_hw_state { + struct skl_ddb_entry ddb[I915_MAX_PLANES]; + struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; + struct skl_pipe_wm wm; + } *hw; + const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal; + int level, max_level = ilk_wm_max_level(i915); + struct intel_plane *plane; + u8 hw_enabled_slices; + + if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active) + return; + + hw = kzalloc(sizeof(*hw), GFP_KERNEL); + if (!hw) + return; + + skl_pipe_wm_get_hw_state(crtc, &hw->wm); + + skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y); + + hw_enabled_slices = intel_enabled_dbuf_slices_mask(i915); + + if (DISPLAY_VER(i915) >= 11 && + hw_enabled_slices != i915->display.dbuf.enabled_slices) + drm_err(&i915->drm, + "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n", + i915->display.dbuf.enabled_slices, + hw_enabled_slices); + + for_each_intel_plane_on_crtc(&i915->drm, crtc, plane) { + const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; + const struct skl_wm_level *hw_wm_level, *sw_wm_level; + + /* Watermarks */ + for (level = 0; level <= max_level; level++) { + hw_wm_level = &hw->wm.planes[plane->id].wm[level]; + sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level); + + if (skl_wm_level_equals(hw_wm_level, sw_wm_level)) + continue; + + drm_err(&i915->drm, + "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", + plane->base.base.id, plane->base.name, level, + sw_wm_level->enable, + sw_wm_level->blocks, + sw_wm_level->lines, + hw_wm_level->enable, + hw_wm_level->blocks, + hw_wm_level->lines); + } + + hw_wm_level = &hw->wm.planes[plane->id].trans_wm; + sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id); + + if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) { + drm_err(&i915->drm, + "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", + plane->base.base.id, plane->base.name, + sw_wm_level->enable, + sw_wm_level->blocks, + sw_wm_level->lines, + hw_wm_level->enable, + hw_wm_level->blocks, + hw_wm_level->lines); + } + + hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0; + sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0; + + if (HAS_HW_SAGV_WM(i915) && + !skl_wm_level_equals(hw_wm_level, sw_wm_level)) { + drm_err(&i915->drm, + "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", + plane->base.base.id, plane->base.name, + sw_wm_level->enable, + sw_wm_level->blocks, + sw_wm_level->lines, + hw_wm_level->enable, + hw_wm_level->blocks, + hw_wm_level->lines); + } + + hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm; + sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm; + + if (HAS_HW_SAGV_WM(i915) && + !skl_wm_level_equals(hw_wm_level, sw_wm_level)) { + drm_err(&i915->drm, + "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", + plane->base.base.id, plane->base.name, + sw_wm_level->enable, + sw_wm_level->blocks, + sw_wm_level->lines, + hw_wm_level->enable, + hw_wm_level->blocks, + hw_wm_level->lines); + } + + /* DDB */ + hw_ddb_entry = &hw->ddb[PLANE_CURSOR]; + sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR]; + + if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { + drm_err(&i915->drm, + "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n", + plane->base.base.id, plane->base.name, + sw_ddb_entry->start, sw_ddb_entry->end, + hw_ddb_entry->start, hw_ddb_entry->end); + } + } + + kfree(hw); +} + +bool skl_watermark_ipc_enabled(struct drm_i915_private *i915) +{ + return i915->display.wm.ipc_enabled; +} + +void skl_watermark_ipc_update(struct drm_i915_private *i915) +{ + if (!HAS_IPC(i915)) + return; + + intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL2, DISP_IPC_ENABLE, + skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0); +} + +static bool skl_watermark_ipc_can_enable(struct drm_i915_private *i915) +{ + /* Display WA #0477 WaDisableIPC: skl */ + if (IS_SKYLAKE(i915)) + return false; + + /* Display WA #1141: SKL:all KBL:all CFL */ + if (IS_KABYLAKE(i915) || + IS_COFFEELAKE(i915) || + IS_COMETLAKE(i915)) + return i915->dram_info.symmetric_memory; + + return true; +} + +void skl_watermark_ipc_init(struct drm_i915_private *i915) +{ + if (!HAS_IPC(i915)) + return; + + i915->display.wm.ipc_enabled = skl_watermark_ipc_can_enable(i915); + + skl_watermark_ipc_update(i915); +} + +static void +adjust_wm_latency(struct drm_i915_private *i915, + u16 wm[], int max_level, int read_latency) +{ + bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed; + int i, level; + + /* + * If a level n (n > 1) has a 0us latency, all levels m (m >= n) + * need to be disabled. We make sure to sanitize the values out + * of the punit to satisfy this requirement. + */ + for (level = 1; level <= max_level; level++) { + if (wm[level] == 0) { + for (i = level + 1; i <= max_level; i++) + wm[i] = 0; + + max_level = level - 1; + break; + } + } + + /* + * WaWmMemoryReadLatency + * + * punit doesn't take into account the read latency so we need + * to add proper adjustement to each valid level we retrieve + * from the punit when level 0 response data is 0us. + */ + if (wm[0] == 0) { + for (level = 0; level <= max_level; level++) + wm[level] += read_latency; + } + + /* + * WA Level-0 adjustment for 16GB DIMMs: SKL+ + * If we could not get dimm info enable this WA to prevent from + * any underrun. If not able to get Dimm info assume 16GB dimm + * to avoid any underrun. + */ + if (wm_lv_0_adjust_needed) + wm[0] += 1; +} + +static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) +{ + struct intel_uncore *uncore = &i915->uncore; + int max_level = ilk_wm_max_level(i915); + u32 val; + + val = intel_uncore_read(uncore, MTL_LATENCY_LP0_LP1); + wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val); + wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val); + + val = intel_uncore_read(uncore, MTL_LATENCY_LP2_LP3); + wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val); + wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val); + + val = intel_uncore_read(uncore, MTL_LATENCY_LP4_LP5); + wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val); + wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val); + + adjust_wm_latency(i915, wm, max_level, 6); +} + +static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) +{ + int max_level = ilk_wm_max_level(i915); + int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2; + int mult = IS_DG2(i915) ? 2 : 1; + u32 val; + int ret; + + /* read the first set of memory latencies[0:3] */ + val = 0; /* data0 to be programmed to 0 for first set */ + ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL); + if (ret) { + drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret); + return; + } + + wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult; + wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult; + wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult; + wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult; + + /* read the second set of memory latencies[4:7] */ + val = 1; /* data0 to be programmed to 1 for second set */ + ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL); + if (ret) { + drm_err(&i915->drm, "SKL Mailbox read error = %d\n", ret); + return; + } + + wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult; + wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult; + wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult; + wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult; + + adjust_wm_latency(i915, wm, max_level, read_latency); +} + +static void skl_setup_wm_latency(struct drm_i915_private *i915) +{ + if (DISPLAY_VER(i915) >= 14) + mtl_read_wm_latency(i915, i915->display.wm.skl_latency); + else + skl_read_wm_latency(i915, i915->display.wm.skl_latency); + + intel_print_wm_latency(i915, "Gen9 Plane", i915->display.wm.skl_latency); +} + +static const struct intel_wm_funcs skl_wm_funcs = { + .compute_global_watermarks = skl_compute_wm, +}; + +void skl_wm_init(struct drm_i915_private *i915) +{ + intel_sagv_init(i915); + + skl_setup_wm_latency(i915); + + i915->display.funcs.wm = &skl_wm_funcs; +} + +static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj) +{ + struct intel_dbuf_state *dbuf_state; + + dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL); + if (!dbuf_state) + return NULL; + + return &dbuf_state->base; +} + +static void intel_dbuf_destroy_state(struct intel_global_obj *obj, + struct intel_global_state *state) +{ + kfree(state); +} + +static const struct intel_global_state_funcs intel_dbuf_funcs = { + .atomic_duplicate_state = intel_dbuf_duplicate_state, + .atomic_destroy_state = intel_dbuf_destroy_state, +}; + +struct intel_dbuf_state * +intel_atomic_get_dbuf_state(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_global_state *dbuf_state; + + dbuf_state = intel_atomic_get_global_obj_state(state, &i915->display.dbuf.obj); + if (IS_ERR(dbuf_state)) + return ERR_CAST(dbuf_state); + + return to_intel_dbuf_state(dbuf_state); +} + +int intel_dbuf_init(struct drm_i915_private *i915) +{ + struct intel_dbuf_state *dbuf_state; + + dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL); + if (!dbuf_state) + return -ENOMEM; + + intel_atomic_global_obj_init(i915, &i915->display.dbuf.obj, + &dbuf_state->base, &intel_dbuf_funcs); + + return 0; +} + +/* + * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before + * update the request state of all DBUS slices. + */ +static void update_mbus_pre_enable(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + u32 mbus_ctl, dbuf_min_tracker_val; + enum dbuf_slice slice; + const struct intel_dbuf_state *dbuf_state = + intel_atomic_get_new_dbuf_state(state); + + if (!HAS_MBUS_JOINING(i915)) + return; + + /* + * TODO: Implement vblank synchronized MBUS joining changes. + * Must be properly coordinated with dbuf reprogramming. + */ + if (dbuf_state->joined_mbus) { + mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN | + MBUS_JOIN_PIPE_SELECT_NONE; + dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3); + } else { + mbus_ctl = MBUS_HASHING_MODE_2x2 | + MBUS_JOIN_PIPE_SELECT_NONE; + dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1); + } + + intel_de_rmw(i915, MBUS_CTL, + MBUS_HASHING_MODE_MASK | MBUS_JOIN | + MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl); + + for_each_dbuf_slice(i915, slice) + intel_de_rmw(i915, DBUF_CTL_S(slice), + DBUF_MIN_TRACKER_STATE_SERVICE_MASK, + dbuf_min_tracker_val); +} + +void intel_dbuf_pre_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_dbuf_state *new_dbuf_state = + intel_atomic_get_new_dbuf_state(state); + const struct intel_dbuf_state *old_dbuf_state = + intel_atomic_get_old_dbuf_state(state); + + if (!new_dbuf_state || + (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices && + new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)) + return; + + WARN_ON(!new_dbuf_state->base.changed); + + update_mbus_pre_enable(state); + gen9_dbuf_slices_update(i915, + old_dbuf_state->enabled_slices | + new_dbuf_state->enabled_slices); +} + +void intel_dbuf_post_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_dbuf_state *new_dbuf_state = + intel_atomic_get_new_dbuf_state(state); + const struct intel_dbuf_state *old_dbuf_state = + intel_atomic_get_old_dbuf_state(state); + + if (!new_dbuf_state || + (new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices && + new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus)) + return; + + WARN_ON(!new_dbuf_state->base.changed); + + gen9_dbuf_slices_update(i915, + new_dbuf_state->enabled_slices); +} + +static bool xelpdp_is_only_pipe_per_dbuf_bank(enum pipe pipe, u8 active_pipes) +{ + switch (pipe) { + case PIPE_A: + return !(active_pipes & BIT(PIPE_D)); + case PIPE_D: + return !(active_pipes & BIT(PIPE_A)); + case PIPE_B: + return !(active_pipes & BIT(PIPE_C)); + case PIPE_C: + return !(active_pipes & BIT(PIPE_B)); + default: /* to suppress compiler warning */ + MISSING_CASE(pipe); + break; + } + + return false; +} + +void intel_mbus_dbox_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state; + const struct intel_crtc_state *new_crtc_state; + const struct intel_crtc *crtc; + u32 val = 0; + int i; + + if (DISPLAY_VER(i915) < 11) + return; + + new_dbuf_state = intel_atomic_get_new_dbuf_state(state); + old_dbuf_state = intel_atomic_get_old_dbuf_state(state); + if (!new_dbuf_state || + (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus && + new_dbuf_state->active_pipes == old_dbuf_state->active_pipes)) + return; + + if (DISPLAY_VER(i915) >= 14) + val |= MBUS_DBOX_I_CREDIT(2); + + if (DISPLAY_VER(i915) >= 12) { + val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16); + val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1); + val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN; + } + + if (DISPLAY_VER(i915) >= 14) + val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(12) : + MBUS_DBOX_A_CREDIT(8); + else if (IS_ALDERLAKE_P(i915)) + /* Wa_22010947358:adl-p */ + val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) : + MBUS_DBOX_A_CREDIT(4); + else + val |= MBUS_DBOX_A_CREDIT(2); + + if (DISPLAY_VER(i915) >= 14) { + val |= MBUS_DBOX_B_CREDIT(0xA); + } else if (IS_ALDERLAKE_P(i915)) { + val |= MBUS_DBOX_BW_CREDIT(2); + val |= MBUS_DBOX_B_CREDIT(8); + } else if (DISPLAY_VER(i915) >= 12) { + val |= MBUS_DBOX_BW_CREDIT(2); + val |= MBUS_DBOX_B_CREDIT(12); + } else { + val |= MBUS_DBOX_BW_CREDIT(1); + val |= MBUS_DBOX_B_CREDIT(8); + } + + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + u32 pipe_val = val; + + if (!new_crtc_state->hw.active) + continue; + + if (DISPLAY_VER(i915) >= 14) { + if (xelpdp_is_only_pipe_per_dbuf_bank(crtc->pipe, + new_dbuf_state->active_pipes)) + pipe_val |= MBUS_DBOX_BW_8CREDITS_MTL; + else + pipe_val |= MBUS_DBOX_BW_4CREDITS_MTL; + } + + intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), pipe_val); + } +} + +static int skl_watermark_ipc_status_show(struct seq_file *m, void *data) +{ + struct drm_i915_private *i915 = m->private; + + seq_printf(m, "Isochronous Priority Control: %s\n", + str_yes_no(skl_watermark_ipc_enabled(i915))); + return 0; +} + +static int skl_watermark_ipc_status_open(struct inode *inode, struct file *file) +{ + struct drm_i915_private *i915 = inode->i_private; + + return single_open(file, skl_watermark_ipc_status_show, i915); +} + +static ssize_t skl_watermark_ipc_status_write(struct file *file, + const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_i915_private *i915 = m->private; + intel_wakeref_t wakeref; + bool enable; + int ret; + + ret = kstrtobool_from_user(ubuf, len, &enable); + if (ret < 0) + return ret; + + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + if (!skl_watermark_ipc_enabled(i915) && enable) + drm_info(&i915->drm, + "Enabling IPC: WM will be proper only after next commit\n"); + i915->display.wm.ipc_enabled = enable; + skl_watermark_ipc_update(i915); + } + + return len; +} + +static const struct file_operations skl_watermark_ipc_status_fops = { + .owner = THIS_MODULE, + .open = skl_watermark_ipc_status_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = skl_watermark_ipc_status_write +}; + +void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915) +{ + struct drm_minor *minor = i915->drm.primary; + + if (!HAS_IPC(i915)) + return; + + debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915, + &skl_watermark_ipc_status_fops); +} diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h new file mode 100644 index 000000000000..7a5a4e67cd73 --- /dev/null +++ b/drivers/gpu/drm/i915/display/skl_watermark.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __SKL_WATERMARK_H__ +#define __SKL_WATERMARK_H__ + +#include <linux/types.h> + +#include "intel_display.h" +#include "intel_global_state.h" +#include "intel_pm_types.h" + +struct drm_i915_private; +struct intel_atomic_state; +struct intel_bw_state; +struct intel_crtc; +struct intel_crtc_state; +struct intel_plane; + +u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915); + +void intel_sagv_pre_plane_update(struct intel_atomic_state *state); +void intel_sagv_post_plane_update(struct intel_atomic_state *state); +bool intel_can_enable_sagv(struct drm_i915_private *i915, + const struct intel_bw_state *bw_state); + +u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *i915, + const struct skl_ddb_entry *entry); + +void skl_write_plane_wm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state); +void skl_write_cursor_wm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state); + +bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, + const struct skl_ddb_entry *entries, + int num_entries, int ignore_idx); + +void skl_wm_get_hw_state(struct drm_i915_private *i915); +void skl_wm_sanitize(struct drm_i915_private *i915); + +void intel_wm_state_verify(struct intel_crtc *crtc, + struct intel_crtc_state *new_crtc_state); + +void skl_watermark_ipc_init(struct drm_i915_private *i915); +void skl_watermark_ipc_update(struct drm_i915_private *i915); +bool skl_watermark_ipc_enabled(struct drm_i915_private *i915); +void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915); + +void skl_wm_init(struct drm_i915_private *i915); + +struct intel_dbuf_state { + struct intel_global_state base; + + struct skl_ddb_entry ddb[I915_MAX_PIPES]; + unsigned int weight[I915_MAX_PIPES]; + u8 slices[I915_MAX_PIPES]; + u8 enabled_slices; + u8 active_pipes; + bool joined_mbus; +}; + +struct intel_dbuf_state * +intel_atomic_get_dbuf_state(struct intel_atomic_state *state); + +#define to_intel_dbuf_state(x) container_of((x), struct intel_dbuf_state, base) +#define intel_atomic_get_old_dbuf_state(state) \ + to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj)) +#define intel_atomic_get_new_dbuf_state(state) \ + to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.dbuf.obj)) + +int intel_dbuf_init(struct drm_i915_private *i915); +void intel_dbuf_pre_plane_update(struct intel_atomic_state *state); +void intel_dbuf_post_plane_update(struct intel_atomic_state *state); +void intel_mbus_dbox_update(struct intel_atomic_state *state); + +#endif /* __SKL_WATERMARK_H__ */ + diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index b9b1fed99874..b3f5ca280ef2 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -822,9 +822,9 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, u32 val; /* Disable DPOunit clock gating, can stall pipe */ - val = intel_de_read(dev_priv, DSPCLK_GATE_D); + val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv)); val |= DPOUNIT_CLOCK_GATE_DISABLE; - intel_de_write(dev_priv, DSPCLK_GATE_D, val); + intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val); } if (!IS_GEMINILAKE(dev_priv)) @@ -998,9 +998,9 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state, vlv_dsi_pll_disable(encoder); - val = intel_de_read(dev_priv, DSPCLK_GATE_D); + val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv)); val &= ~DPOUNIT_CLOCK_GATE_DISABLE; - intel_de_write(dev_priv, DSPCLK_GATE_D, val); + intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val); } /* Assert reset */ @@ -1277,13 +1277,12 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, pclk = vlv_dsi_get_pclk(encoder, pipe_config); } - if (intel_dsi->dual_link) - pclk *= 2; + pipe_config->port_clock = pclk; - if (pclk) { - pipe_config->hw.adjusted_mode.crtc_clock = pclk; - pipe_config->port_clock = pclk; - } + /* FIXME definitely not right for burst/cmd mode/pixel overlap */ + pipe_config->hw.adjusted_mode.crtc_clock = pclk; + if (intel_dsi->dual_link) + pipe_config->hw.adjusted_mode.crtc_clock *= 2; } /* return txclkesc cycles in terms of divider and duration in us */ @@ -1872,9 +1871,9 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) return; if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - dev_priv->mipi_mmio_base = BXT_MIPI_BASE; + dev_priv->display.dsi.mmio_base = BXT_MIPI_BASE; else - dev_priv->mipi_mmio_base = VLV_MIPI_BASE; + dev_priv->display.dsi.mmio_base = VLV_MIPI_BASE; intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL); if (!intel_dsi) @@ -1933,8 +1932,11 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) else intel_dsi->ports = BIT(port); - intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports; - intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports; + if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports)) + intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports; + + if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports)) + intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports; /* Create a DSI host (and a device) for each port. */ for_each_dsi_port(port, intel_dsi->ports) { diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index 5894b0138343..af7402127cd9 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -113,6 +113,61 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv, return 0; } +static int vlv_dsi_pclk(struct intel_encoder *encoder, + struct intel_crtc_state *config) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); + u32 dsi_clock; + u32 pll_ctl, pll_div; + u32 m = 0, p = 0, n; + int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000; + int i; + + pll_ctl = config->dsi_pll.ctrl; + pll_div = config->dsi_pll.div; + + /* mask out other bits and extract the P1 divisor */ + pll_ctl &= DSI_PLL_P1_POST_DIV_MASK; + pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2); + + /* N1 divisor */ + n = (pll_div & DSI_PLL_N1_DIV_MASK) >> DSI_PLL_N1_DIV_SHIFT; + n = 1 << n; /* register has log2(N1) */ + + /* mask out the other bits and extract the M1 divisor */ + pll_div &= DSI_PLL_M1_DIV_MASK; + pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT; + + while (pll_ctl) { + pll_ctl = pll_ctl >> 1; + p++; + } + p--; + + if (!p) { + drm_err(&dev_priv->drm, "wrong P1 divisor\n"); + return 0; + } + + for (i = 0; i < ARRAY_SIZE(lfsr_converts); i++) { + if (lfsr_converts[i] == pll_div) + break; + } + + if (i == ARRAY_SIZE(lfsr_converts)) { + drm_err(&dev_priv->drm, "wrong m_seed programmed\n"); + return 0; + } + + m = i + 62; + + dsi_clock = (m * refclk) / (p * n); + + return DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, bpp); +} + /* * XXX: The muxing and gating is hard coded for now. Need to add support for * sharing PLLs with two DSI outputs. @@ -122,8 +177,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - int ret; - u32 dsi_clk; + int pclk, dsi_clk, ret; dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format, intel_dsi->lane_count); @@ -145,6 +199,14 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder, drm_dbg_kms(&dev_priv->drm, "dsi pll div %08x, ctrl %08x\n", config->dsi_pll.div, config->dsi_pll.ctrl); + pclk = vlv_dsi_pclk(encoder, config); + config->port_clock = pclk; + + /* FIXME definitely not right for burst/cmd mode/pixel overlap */ + config->hw.adjusted_mode.crtc_clock = pclk; + if (intel_dsi->dual_link) + config->hw.adjusted_mode.crtc_clock *= 2; + return 0; } @@ -262,13 +324,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); - u32 dsi_clock, pclk; u32 pll_ctl, pll_div; - u32 m = 0, p = 0, n; - int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000; - int i; drm_dbg_kms(&dev_priv->drm, "\n"); @@ -280,65 +336,31 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, config->dsi_pll.ctrl = pll_ctl & ~DSI_PLL_LOCK; config->dsi_pll.div = pll_div; - /* mask out other bits and extract the P1 divisor */ - pll_ctl &= DSI_PLL_P1_POST_DIV_MASK; - pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2); - - /* N1 divisor */ - n = (pll_div & DSI_PLL_N1_DIV_MASK) >> DSI_PLL_N1_DIV_SHIFT; - n = 1 << n; /* register has log2(N1) */ - - /* mask out the other bits and extract the M1 divisor */ - pll_div &= DSI_PLL_M1_DIV_MASK; - pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT; - - while (pll_ctl) { - pll_ctl = pll_ctl >> 1; - p++; - } - p--; - - if (!p) { - drm_err(&dev_priv->drm, "wrong P1 divisor\n"); - return 0; - } - - for (i = 0; i < ARRAY_SIZE(lfsr_converts); i++) { - if (lfsr_converts[i] == pll_div) - break; - } - - if (i == ARRAY_SIZE(lfsr_converts)) { - drm_err(&dev_priv->drm, "wrong m_seed programmed\n"); - return 0; - } - - m = i + 62; + return vlv_dsi_pclk(encoder, config); +} - dsi_clock = (m * refclk) / (p * n); +static int bxt_dsi_pclk(struct intel_encoder *encoder, + const struct intel_crtc_state *config) +{ + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); + u32 dsi_ratio, dsi_clk; - pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, bpp); + dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK; + dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2; - return pclk; + return DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, bpp); } u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config) { - u32 pclk; - u32 dsi_clk; - u32 dsi_ratio; - struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); + u32 pclk; config->dsi_pll.ctrl = intel_de_read(dev_priv, BXT_DSI_PLL_CTL); - dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK; - - dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2; - - pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, bpp); + pclk = bxt_dsi_pclk(encoder, config); drm_dbg(&dev_priv->drm, "Calculated pclk=%u\n", pclk); return pclk; @@ -463,6 +485,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder, struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max; u32 dsi_clk; + int pclk; dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format, intel_dsi->lane_count); @@ -502,6 +525,14 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder, if (IS_BROXTON(dev_priv) && dsi_ratio <= 50) config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1; + pclk = bxt_dsi_pclk(encoder, config); + config->port_clock = pclk; + + /* FIXME definitely not right for burst/cmd mode/pixel overlap */ + config->hw.adjusted_mode.crtc_clock = pclk; + if (intel_dsi->dual_link) + config->hw.adjusted_mode.crtc_clock *= 2; + return 0; } diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_regs.h b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h index 356e51515346..e065b8f2ee08 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_regs.h +++ b/drivers/gpu/drm/i915/display/vlv_dsi_regs.h @@ -11,6 +11,8 @@ #define VLV_MIPI_BASE VLV_DISPLAY_BASE #define BXT_MIPI_BASE 0x60000 +#define _MIPI_MMIO_BASE(__i915) ((__i915)->display.dsi.mmio_base) + #define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */ #define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c)) @@ -96,8 +98,8 @@ /* MIPI DSI Controller and D-PHY registers */ -#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000) -#define _MIPIC_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800) +#define _MIPIA_DEVICE_READY (_MIPI_MMIO_BASE(dev_priv) + 0xb000) +#define _MIPIC_DEVICE_READY (_MIPI_MMIO_BASE(dev_priv) + 0xb800) #define MIPI_DEVICE_READY(port) _MMIO_MIPI(port, _MIPIA_DEVICE_READY, _MIPIC_DEVICE_READY) #define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */ #define ULPS_STATE_MASK (3 << 1) @@ -106,11 +108,11 @@ #define ULPS_STATE_NORMAL_OPERATION (0 << 1) #define DEVICE_READY (1 << 0) -#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004) -#define _MIPIC_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804) +#define _MIPIA_INTR_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb004) +#define _MIPIC_INTR_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb804) #define MIPI_INTR_STAT(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT, _MIPIC_INTR_STAT) -#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008) -#define _MIPIC_INTR_EN (dev_priv->mipi_mmio_base + 0xb808) +#define _MIPIA_INTR_EN (_MIPI_MMIO_BASE(dev_priv) + 0xb008) +#define _MIPIC_INTR_EN (_MIPI_MMIO_BASE(dev_priv) + 0xb808) #define MIPI_INTR_EN(port) _MMIO_MIPI(port, _MIPIA_INTR_EN, _MIPIC_INTR_EN) #define TEARING_EFFECT (1 << 31) #define SPL_PKT_SENT_INTERRUPT (1 << 30) @@ -145,8 +147,8 @@ #define RXSOT_SYNC_ERROR (1 << 1) #define RXSOT_ERROR (1 << 0) -#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c) -#define _MIPIC_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c) +#define _MIPIA_DSI_FUNC_PRG (_MIPI_MMIO_BASE(dev_priv) + 0xb00c) +#define _MIPIC_DSI_FUNC_PRG (_MIPI_MMIO_BASE(dev_priv) + 0xb80c) #define MIPI_DSI_FUNC_PRG(port) _MMIO_MIPI(port, _MIPIA_DSI_FUNC_PRG, _MIPIC_DSI_FUNC_PRG) #define CMD_MODE_DATA_WIDTH_MASK (7 << 13) #define CMD_MODE_NOT_SUPPORTED (0 << 13) @@ -168,76 +170,76 @@ #define DATA_LANES_PRG_REG_SHIFT 0 #define DATA_LANES_PRG_REG_MASK (7 << 0) -#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010) -#define _MIPIC_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810) +#define _MIPIA_HS_TX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb010) +#define _MIPIC_HS_TX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb810) #define MIPI_HS_TX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_HS_TX_TIMEOUT, _MIPIC_HS_TX_TIMEOUT) #define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff -#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014) -#define _MIPIC_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814) +#define _MIPIA_LP_RX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb014) +#define _MIPIC_LP_RX_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb814) #define MIPI_LP_RX_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_LP_RX_TIMEOUT, _MIPIC_LP_RX_TIMEOUT) #define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff -#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018) -#define _MIPIC_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818) +#define _MIPIA_TURN_AROUND_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb018) +#define _MIPIC_TURN_AROUND_TIMEOUT (_MIPI_MMIO_BASE(dev_priv) + 0xb818) #define MIPI_TURN_AROUND_TIMEOUT(port) _MMIO_MIPI(port, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT) #define TURN_AROUND_TIMEOUT_MASK 0x3f -#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c) -#define _MIPIC_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c) +#define _MIPIA_DEVICE_RESET_TIMER (_MIPI_MMIO_BASE(dev_priv) + 0xb01c) +#define _MIPIC_DEVICE_RESET_TIMER (_MIPI_MMIO_BASE(dev_priv) + 0xb81c) #define MIPI_DEVICE_RESET_TIMER(port) _MMIO_MIPI(port, _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER) #define DEVICE_RESET_TIMER_MASK 0xffff -#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020) -#define _MIPIC_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820) +#define _MIPIA_DPI_RESOLUTION (_MIPI_MMIO_BASE(dev_priv) + 0xb020) +#define _MIPIC_DPI_RESOLUTION (_MIPI_MMIO_BASE(dev_priv) + 0xb820) #define MIPI_DPI_RESOLUTION(port) _MMIO_MIPI(port, _MIPIA_DPI_RESOLUTION, _MIPIC_DPI_RESOLUTION) #define VERTICAL_ADDRESS_SHIFT 16 #define VERTICAL_ADDRESS_MASK (0xffff << 16) #define HORIZONTAL_ADDRESS_SHIFT 0 #define HORIZONTAL_ADDRESS_MASK 0xffff -#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024) -#define _MIPIC_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824) +#define _MIPIA_DBI_FIFO_THROTTLE (_MIPI_MMIO_BASE(dev_priv) + 0xb024) +#define _MIPIC_DBI_FIFO_THROTTLE (_MIPI_MMIO_BASE(dev_priv) + 0xb824) #define MIPI_DBI_FIFO_THROTTLE(port) _MMIO_MIPI(port, _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE) #define DBI_FIFO_EMPTY_HALF (0 << 0) #define DBI_FIFO_EMPTY_QUARTER (1 << 0) #define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0) /* regs below are bits 15:0 */ -#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028) -#define _MIPIC_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828) +#define _MIPIA_HSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb028) +#define _MIPIC_HSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb828) #define MIPI_HSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT) -#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c) -#define _MIPIC_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c) +#define _MIPIA_HBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb02c) +#define _MIPIC_HBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb82c) #define MIPI_HBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HBP_COUNT, _MIPIC_HBP_COUNT) -#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030) -#define _MIPIC_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830) +#define _MIPIA_HFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb030) +#define _MIPIC_HFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb830) #define MIPI_HFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_HFP_COUNT, _MIPIC_HFP_COUNT) -#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034) -#define _MIPIC_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834) +#define _MIPIA_HACTIVE_AREA_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb034) +#define _MIPIC_HACTIVE_AREA_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb834) #define MIPI_HACTIVE_AREA_COUNT(port) _MMIO_MIPI(port, _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT) -#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038) -#define _MIPIC_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838) +#define _MIPIA_VSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb038) +#define _MIPIC_VSYNC_PADDING_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb838) #define MIPI_VSYNC_PADDING_COUNT(port) _MMIO_MIPI(port, _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT) -#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c) -#define _MIPIC_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c) +#define _MIPIA_VBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb03c) +#define _MIPIC_VBP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb83c) #define MIPI_VBP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VBP_COUNT, _MIPIC_VBP_COUNT) -#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040) -#define _MIPIC_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840) +#define _MIPIA_VFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb040) +#define _MIPIC_VFP_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb840) #define MIPI_VFP_COUNT(port) _MMIO_MIPI(port, _MIPIA_VFP_COUNT, _MIPIC_VFP_COUNT) -#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044) -#define _MIPIC_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844) +#define _MIPIA_HIGH_LOW_SWITCH_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb044) +#define _MIPIC_HIGH_LOW_SWITCH_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb844) #define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MMIO_MIPI(port, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT) -#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048) -#define _MIPIC_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848) +#define _MIPIA_DPI_CONTROL (_MIPI_MMIO_BASE(dev_priv) + 0xb048) +#define _MIPIC_DPI_CONTROL (_MIPI_MMIO_BASE(dev_priv) + 0xb848) #define MIPI_DPI_CONTROL(port) _MMIO_MIPI(port, _MIPIA_DPI_CONTROL, _MIPIC_DPI_CONTROL) #define DPI_LP_MODE (1 << 6) #define BACKLIGHT_OFF (1 << 5) @@ -247,27 +249,27 @@ #define TURN_ON (1 << 1) #define SHUTDOWN (1 << 0) -#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c) -#define _MIPIC_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c) +#define _MIPIA_DPI_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb04c) +#define _MIPIC_DPI_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb84c) #define MIPI_DPI_DATA(port) _MMIO_MIPI(port, _MIPIA_DPI_DATA, _MIPIC_DPI_DATA) #define COMMAND_BYTE_SHIFT 0 #define COMMAND_BYTE_MASK (0x3f << 0) -#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050) -#define _MIPIC_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850) +#define _MIPIA_INIT_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb050) +#define _MIPIC_INIT_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb850) #define MIPI_INIT_COUNT(port) _MMIO_MIPI(port, _MIPIA_INIT_COUNT, _MIPIC_INIT_COUNT) #define MASTER_INIT_TIMER_SHIFT 0 #define MASTER_INIT_TIMER_MASK (0xffff << 0) -#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054) -#define _MIPIC_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854) +#define _MIPIA_MAX_RETURN_PKT_SIZE (_MIPI_MMIO_BASE(dev_priv) + 0xb054) +#define _MIPIC_MAX_RETURN_PKT_SIZE (_MIPI_MMIO_BASE(dev_priv) + 0xb854) #define MIPI_MAX_RETURN_PKT_SIZE(port) _MMIO_MIPI(port, \ _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE) #define MAX_RETURN_PKT_SIZE_SHIFT 0 #define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0) -#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058) -#define _MIPIC_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858) +#define _MIPIA_VIDEO_MODE_FORMAT (_MIPI_MMIO_BASE(dev_priv) + 0xb058) +#define _MIPIC_VIDEO_MODE_FORMAT (_MIPI_MMIO_BASE(dev_priv) + 0xb858) #define MIPI_VIDEO_MODE_FORMAT(port) _MMIO_MIPI(port, _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT) #define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4) #define DISABLE_VIDEO_BTA (1 << 3) @@ -276,8 +278,8 @@ #define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0) #define VIDEO_MODE_BURST (3 << 0) -#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c) -#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c) +#define _MIPIA_EOT_DISABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb05c) +#define _MIPIC_EOT_DISABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb85c) #define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE) #define BXT_DEFEATURE_DPI_FIFO_CTR (1 << 9) #define BXT_DPHY_DEFEATURE_EN (1 << 8) @@ -290,35 +292,35 @@ #define CLOCKSTOP (1 << 1) #define EOT_DISABLE (1 << 0) -#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060) -#define _MIPIC_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860) +#define _MIPIA_LP_BYTECLK (_MIPI_MMIO_BASE(dev_priv) + 0xb060) +#define _MIPIC_LP_BYTECLK (_MIPI_MMIO_BASE(dev_priv) + 0xb860) #define MIPI_LP_BYTECLK(port) _MMIO_MIPI(port, _MIPIA_LP_BYTECLK, _MIPIC_LP_BYTECLK) #define LP_BYTECLK_SHIFT 0 #define LP_BYTECLK_MASK (0xffff << 0) -#define _MIPIA_TLPX_TIME_COUNT (dev_priv->mipi_mmio_base + 0xb0a4) -#define _MIPIC_TLPX_TIME_COUNT (dev_priv->mipi_mmio_base + 0xb8a4) +#define _MIPIA_TLPX_TIME_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb0a4) +#define _MIPIC_TLPX_TIME_COUNT (_MIPI_MMIO_BASE(dev_priv) + 0xb8a4) #define MIPI_TLPX_TIME_COUNT(port) _MMIO_MIPI(port, _MIPIA_TLPX_TIME_COUNT, _MIPIC_TLPX_TIME_COUNT) -#define _MIPIA_CLK_LANE_TIMING (dev_priv->mipi_mmio_base + 0xb098) -#define _MIPIC_CLK_LANE_TIMING (dev_priv->mipi_mmio_base + 0xb898) +#define _MIPIA_CLK_LANE_TIMING (_MIPI_MMIO_BASE(dev_priv) + 0xb098) +#define _MIPIC_CLK_LANE_TIMING (_MIPI_MMIO_BASE(dev_priv) + 0xb898) #define MIPI_CLK_LANE_TIMING(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_TIMING, _MIPIC_CLK_LANE_TIMING) /* bits 31:0 */ -#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064) -#define _MIPIC_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864) +#define _MIPIA_LP_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb064) +#define _MIPIC_LP_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb864) #define MIPI_LP_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_DATA, _MIPIC_LP_GEN_DATA) /* bits 31:0 */ -#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068) -#define _MIPIC_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868) +#define _MIPIA_HS_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb068) +#define _MIPIC_HS_GEN_DATA (_MIPI_MMIO_BASE(dev_priv) + 0xb868) #define MIPI_HS_GEN_DATA(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_DATA, _MIPIC_HS_GEN_DATA) -#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c) -#define _MIPIC_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c) +#define _MIPIA_LP_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb06c) +#define _MIPIC_LP_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb86c) #define MIPI_LP_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_LP_GEN_CTRL, _MIPIC_LP_GEN_CTRL) -#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070) -#define _MIPIC_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870) +#define _MIPIA_HS_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb070) +#define _MIPIC_HS_GEN_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb870) #define MIPI_HS_GEN_CTRL(port) _MMIO_MIPI(port, _MIPIA_HS_GEN_CTRL, _MIPIC_HS_GEN_CTRL) #define LONG_PACKET_WORD_COUNT_SHIFT 8 #define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8) @@ -330,8 +332,8 @@ #define DATA_TYPE_MASK (0x3f << 0) /* data type values, see include/video/mipi_display.h */ -#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074) -#define _MIPIC_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874) +#define _MIPIA_GEN_FIFO_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb074) +#define _MIPIC_GEN_FIFO_STAT (_MIPI_MMIO_BASE(dev_priv) + 0xb874) #define MIPI_GEN_FIFO_STAT(port) _MMIO_MIPI(port, _MIPIA_GEN_FIFO_STAT, _MIPIC_GEN_FIFO_STAT) #define DPI_FIFO_EMPTY (1 << 28) #define DBI_FIFO_EMPTY (1 << 27) @@ -348,15 +350,15 @@ #define HS_DATA_FIFO_HALF_EMPTY (1 << 1) #define HS_DATA_FIFO_FULL (1 << 0) -#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078) -#define _MIPIC_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878) +#define _MIPIA_HS_LS_DBI_ENABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb078) +#define _MIPIC_HS_LS_DBI_ENABLE (_MIPI_MMIO_BASE(dev_priv) + 0xb878) #define MIPI_HS_LP_DBI_ENABLE(port) _MMIO_MIPI(port, _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE) #define DBI_HS_LP_MODE_MASK (1 << 0) #define DBI_LP_MODE (1 << 0) #define DBI_HS_MODE (0 << 0) -#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080) -#define _MIPIC_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880) +#define _MIPIA_DPHY_PARAM (_MIPI_MMIO_BASE(dev_priv) + 0xb080) +#define _MIPIC_DPHY_PARAM (_MIPI_MMIO_BASE(dev_priv) + 0xb880) #define MIPI_DPHY_PARAM(port) _MMIO_MIPI(port, _MIPIA_DPHY_PARAM, _MIPIC_DPHY_PARAM) #define EXIT_ZERO_COUNT_SHIFT 24 #define EXIT_ZERO_COUNT_MASK (0x3f << 24) @@ -367,34 +369,34 @@ #define PREPARE_COUNT_SHIFT 0 #define PREPARE_COUNT_MASK (0x3f << 0) -#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084) -#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884) +#define _MIPIA_DBI_BW_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb084) +#define _MIPIC_DBI_BW_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb884) #define MIPI_DBI_BW_CTRL(port) _MMIO_MIPI(port, _MIPIA_DBI_BW_CTRL, _MIPIC_DBI_BW_CTRL) -#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb088) -#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base + 0xb888) +#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (_MIPI_MMIO_BASE(dev_priv) + 0xb088) +#define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (_MIPI_MMIO_BASE(dev_priv) + 0xb888) #define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT) #define LP_HS_SSW_CNT_SHIFT 16 #define LP_HS_SSW_CNT_MASK (0xffff << 16) #define HS_LP_PWR_SW_CNT_SHIFT 0 #define HS_LP_PWR_SW_CNT_MASK (0xffff << 0) -#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c) -#define _MIPIC_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c) +#define _MIPIA_STOP_STATE_STALL (_MIPI_MMIO_BASE(dev_priv) + 0xb08c) +#define _MIPIC_STOP_STATE_STALL (_MIPI_MMIO_BASE(dev_priv) + 0xb88c) #define MIPI_STOP_STATE_STALL(port) _MMIO_MIPI(port, _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL) #define STOP_STATE_STALL_COUNTER_SHIFT 0 #define STOP_STATE_STALL_COUNTER_MASK (0xff << 0) -#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090) -#define _MIPIC_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890) +#define _MIPIA_INTR_STAT_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb090) +#define _MIPIC_INTR_STAT_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb890) #define MIPI_INTR_STAT_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1) -#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094) -#define _MIPIC_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894) +#define _MIPIA_INTR_EN_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb094) +#define _MIPIC_INTR_EN_REG_1 (_MIPI_MMIO_BASE(dev_priv) + 0xb894) #define MIPI_INTR_EN_REG_1(port) _MMIO_MIPI(port, _MIPIA_INTR_EN_REG_1, _MIPIC_INTR_EN_REG_1) #define RX_CONTENTION_DETECTED (1 << 0) /* XXX: only pipe A ?!? */ -#define MIPIA_DBI_TYPEC_CTRL (dev_priv->mipi_mmio_base + 0xb100) +#define MIPIA_DBI_TYPEC_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb100) #define DBI_TYPEC_ENABLE (1 << 31) #define DBI_TYPEC_WIP (1 << 30) #define DBI_TYPEC_OPTION_SHIFT 28 @@ -407,8 +409,8 @@ /* MIPI adapter registers */ -#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104) -#define _MIPIC_CTRL (dev_priv->mipi_mmio_base + 0xb904) +#define _MIPIA_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb104) +#define _MIPIC_CTRL (_MIPI_MMIO_BASE(dev_priv) + 0xb904) #define MIPI_CTRL(port) _MMIO_MIPI(port, _MIPIA_CTRL, _MIPIC_CTRL) #define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */ #define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5) @@ -440,21 +442,21 @@ #define GLK_MIPIIO_PORT_POWERED (1 << 1) /* RO */ #define GLK_MIPIIO_ENABLE (1 << 0) -#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108) -#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908) +#define _MIPIA_DATA_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb108) +#define _MIPIC_DATA_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb908) #define MIPI_DATA_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_DATA_ADDRESS, _MIPIC_DATA_ADDRESS) #define DATA_MEM_ADDRESS_SHIFT 5 #define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5) #define DATA_VALID (1 << 0) -#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c) -#define _MIPIC_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c) +#define _MIPIA_DATA_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb10c) +#define _MIPIC_DATA_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb90c) #define MIPI_DATA_LENGTH(port) _MMIO_MIPI(port, _MIPIA_DATA_LENGTH, _MIPIC_DATA_LENGTH) #define DATA_LENGTH_SHIFT 0 #define DATA_LENGTH_MASK (0xfffff << 0) -#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110) -#define _MIPIC_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910) +#define _MIPIA_COMMAND_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb110) +#define _MIPIC_COMMAND_ADDRESS (_MIPI_MMIO_BASE(dev_priv) + 0xb910) #define MIPI_COMMAND_ADDRESS(port) _MMIO_MIPI(port, _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS) #define COMMAND_MEM_ADDRESS_SHIFT 5 #define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5) @@ -462,18 +464,18 @@ #define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1) #define COMMAND_VALID (1 << 0) -#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114) -#define _MIPIC_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914) +#define _MIPIA_COMMAND_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb114) +#define _MIPIC_COMMAND_LENGTH (_MIPI_MMIO_BASE(dev_priv) + 0xb914) #define MIPI_COMMAND_LENGTH(port) _MMIO_MIPI(port, _MIPIA_COMMAND_LENGTH, _MIPIC_COMMAND_LENGTH) #define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */ #define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n))) -#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118) -#define _MIPIC_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918) +#define _MIPIA_READ_DATA_RETURN0 (_MIPI_MMIO_BASE(dev_priv) + 0xb118) +#define _MIPIC_READ_DATA_RETURN0 (_MIPI_MMIO_BASE(dev_priv) + 0xb918) #define MIPI_READ_DATA_RETURN(port, n) _MMIO(_MIPI(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */ -#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138) -#define _MIPIC_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938) +#define _MIPIA_READ_DATA_VALID (_MIPI_MMIO_BASE(dev_priv) + 0xb138) +#define _MIPIC_READ_DATA_VALID (_MIPI_MMIO_BASE(dev_priv) + 0xb938) #define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID) #define READ_DATA_VALID(n) (1 << (n)) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h index 1b88ea13435c..5a7a14e85c3f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h @@ -12,8 +12,6 @@ struct drm_i915_private; struct drm_i915_gem_object; struct intel_memory_region; -extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops; - void __iomem * i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj, unsigned long n, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 3218981488cc..73d9eda1d6b7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -413,7 +413,7 @@ retry: vma->mmo = mmo; if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) - intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref, + intel_wakeref_auto(&to_gt(i915)->userfault_wakeref, msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); if (write) { @@ -550,6 +550,20 @@ out: intel_runtime_pm_put(&i915->runtime_pm, wakeref); } +void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj) +{ + struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + struct ttm_device *bdev = bo->bdev; + + drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); + + if (obj->userfault_count) { + /* rpm wakeref provide exclusive access */ + list_del(&obj->userfault_link); + obj->userfault_count = 0; + } +} + void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) { struct i915_mmap_offset *mmo, *mn; @@ -573,6 +587,13 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) spin_lock(&obj->mmo.lock); } spin_unlock(&obj->mmo.lock); + + if (obj->userfault_count) { + mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock); + list_del(&obj->userfault_link); + mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock); + obj->userfault_count = 0; + } } static struct i915_mmap_offset * diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h index efee9e0d2508..1fa91b3033b3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h @@ -27,6 +27,7 @@ int i915_gem_dumb_mmap_offset(struct drm_file *file_priv, void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj); void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj); +void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj); void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj); #endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 85482a04d158..7ff9c7877bec 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -238,7 +238,7 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj) { /* Skip serialisation and waking the device if known to be not used. */ - if (obj->userfault_count) + if (obj->userfault_count && !IS_DGFX(to_i915(obj->base.dev))) i915_gem_object_release_mmap_gtt(obj); if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 9f6b14ec189a..40305e2bcd49 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -298,7 +298,8 @@ struct drm_i915_gem_object { }; /** - * Whether the object is currently in the GGTT mmap. + * Whether the object is currently in the GGTT or any other supported + * fake offset mmap backed by lmem. */ unsigned int userfault_count; struct list_head userfault_link; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 8357dbdcab5c..4df50b049cea 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -20,7 +20,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, unsigned int sg_page_sizes) { struct drm_i915_private *i915 = to_i915(obj->base.dev); - unsigned long supported = INTEL_INFO(i915)->page_sizes; + unsigned long supported = RUNTIME_INFO(i915)->page_sizes; bool shrinkable; int i; @@ -66,7 +66,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, shrinkable = i915_gem_object_is_shrinkable(obj); if (i915_gem_object_is_tiled(obj) && - i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { + i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) { GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj)); i915_gem_object_set_tiling_quirk(obj); GEM_BUG_ON(!list_empty(&obj->mm.link)); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 00359ec9d58b..3428f735e786 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -24,7 +24,7 @@ void i915_gem_suspend(struct drm_i915_private *i915) { GEM_TRACE("%s\n", dev_name(i915->drm.dev)); - intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref, 0); + intel_wakeref_auto(&to_gt(i915)->userfault_wakeref, 0); flush_workqueue(i915->wq); /* diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 166d0a4b9e8c..acc561c0f0aa 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -18,10 +18,12 @@ #include "gt/intel_region_lmem.h" #include "i915_drv.h" #include "i915_gem_stolen.h" +#include "i915_pci.h" #include "i915_reg.h" #include "i915_utils.h" #include "i915_vgpu.h" #include "intel_mchbar_regs.h" +#include "intel_pci_config.h" /* * The BIOS typically reserves some of the system's memory for the exclusive @@ -428,48 +430,29 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem) reserved_base = stolen_top; reserved_size = 0; - switch (GRAPHICS_VER(i915)) { - case 2: - case 3: - break; - case 4: - if (!IS_G4X(i915)) - break; - fallthrough; - case 5: - g4x_get_stolen_reserved(i915, uncore, + if (GRAPHICS_VER(i915) >= 11) { + icl_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); - break; - case 6: - gen6_get_stolen_reserved(i915, uncore, - &reserved_base, &reserved_size); - break; - case 7: - if (IS_VALLEYVIEW(i915)) - vlv_get_stolen_reserved(i915, uncore, - &reserved_base, &reserved_size); - else - gen7_get_stolen_reserved(i915, uncore, - &reserved_base, &reserved_size); - break; - case 8: - case 9: + } else if (GRAPHICS_VER(i915) >= 8) { if (IS_LP(i915)) chv_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); else bdw_get_stolen_reserved(i915, uncore, &reserved_base, &reserved_size); - break; - default: - MISSING_CASE(GRAPHICS_VER(i915)); - fallthrough; - case 11: - case 12: - icl_get_stolen_reserved(i915, uncore, - &reserved_base, - &reserved_size); - break; + } else if (GRAPHICS_VER(i915) >= 7) { + if (IS_VALLEYVIEW(i915)) + vlv_get_stolen_reserved(i915, uncore, + &reserved_base, &reserved_size); + else + gen7_get_stolen_reserved(i915, uncore, + &reserved_base, &reserved_size); + } else if (GRAPHICS_VER(i915) >= 6) { + gen6_get_stolen_reserved(i915, uncore, + &reserved_base, &reserved_size); + } else if (GRAPHICS_VER(i915) >= 5 || IS_G4X(i915)) { + g4x_get_stolen_reserved(i915, uncore, + &reserved_base, &reserved_size); } /* @@ -827,10 +810,13 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, if (WARN_ON_ONCE(instance)) return ERR_PTR(-ENODEV); + if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR)) + return ERR_PTR(-ENXIO); + /* Use DSM base address instead for stolen memory */ dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE); if (IS_DG1(uncore->i915)) { - lmem_size = pci_resource_len(pdev, 2); + lmem_size = pci_resource_len(pdev, GEN12_LMEM_BAR); if (WARN_ON(lmem_size < dsm_base)) return ERR_PTR(-ENODEV); } else { @@ -842,11 +828,11 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, } dsm_size = lmem_size - dsm_base; - if (pci_resource_len(pdev, 2) < lmem_size) { + if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) { io_start = 0; io_size = 0; } else { - io_start = pci_resource_start(pdev, 2) + dsm_base; + io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base; io_size = dsm_size; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c index 85518b28cd72..fd42b89b7162 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c @@ -278,7 +278,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, */ if (i915_gem_object_has_pages(obj) && obj->mm.madv == I915_MADV_WILLNEED && - i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { + i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) { if (tiling == I915_TILING_NONE) { GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj)); i915_gem_object_clear_tiling_quirk(obj); @@ -458,7 +458,7 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, } /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ - if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) + if (dev_priv->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN; else args->phys_swizzle_mode = args->swizzle_mode; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index f64a3deb12fc..e3fc38dd5db0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -509,9 +509,18 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags) static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + intel_wakeref_t wakeref = 0; + + if (bo->resource && likely(obj)) { + /* ttm_bo_release() already has dma_resv_lock */ + if (i915_ttm_cpu_maps_iomem(bo->resource)) + wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm); - if (likely(obj)) { __i915_gem_object_pages_fini(obj); + + if (wakeref) + intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref); + i915_ttm_free_cached_io_rsgt(obj); } } @@ -981,6 +990,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) struct ttm_buffer_object *bo = area->vm_private_data; struct drm_device *dev = bo->base.dev; struct drm_i915_gem_object *obj; + intel_wakeref_t wakeref = 0; vm_fault_t ret; int idx; @@ -1002,6 +1012,9 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) return VM_FAULT_SIGBUS; } + if (i915_ttm_cpu_maps_iomem(bo->resource)) + wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm); + if (!i915_ttm_resource_mappable(bo->resource)) { int err = -ENODEV; int i; @@ -1023,7 +1036,8 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) if (err) { drm_dbg(dev, "Unable to make resource CPU accessible\n"); dma_resv_unlock(bo->base.resv); - return VM_FAULT_SIGBUS; + ret = VM_FAULT_SIGBUS; + goto out_rpm; } } @@ -1034,12 +1048,30 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) } else { ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); } + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) - return ret; + goto out_rpm; + + /* ttm_bo_vm_reserve() already has dma_resv_lock */ + if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) { + obj->userfault_count = 1; + mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock); + list_add(&obj->userfault_link, &to_gt(to_i915(obj->base.dev))->lmem_userfault_list); + mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock); + } + + if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) + intel_wakeref_auto(&to_gt(to_i915(obj->base.dev))->userfault_wakeref, + msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); i915_ttm_adjust_lru(obj); dma_resv_unlock(bo->base.resv); + +out_rpm: + if (wakeref) + intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref); + return ret; } diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 72ce2c9f42fd..c570cf780079 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -358,7 +358,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single) static int igt_check_page_sizes(struct i915_vma *vma) { struct drm_i915_private *i915 = vma->vm->i915; - unsigned int supported = INTEL_INFO(i915)->page_sizes; + unsigned int supported = RUNTIME_INFO(i915)->page_sizes; struct drm_i915_gem_object *obj = vma->obj; int err; @@ -419,7 +419,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg) { struct i915_ppgtt *ppgtt = arg; struct drm_i915_private *i915 = ppgtt->vm.i915; - unsigned int saved_mask = INTEL_INFO(i915)->page_sizes; + unsigned int saved_mask = RUNTIME_INFO(i915)->page_sizes; struct drm_i915_gem_object *obj; struct i915_vma *vma; int i, j, single; @@ -438,7 +438,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg) combination |= page_sizes[j]; } - mkwrite_device_info(i915)->page_sizes = combination; + RUNTIME_INFO(i915)->page_sizes = combination; for (single = 0; single <= 1; ++single) { obj = fake_huge_pages_object(i915, combination, !!single); @@ -485,7 +485,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg) out_put: i915_gem_object_put(obj); out_device: - mkwrite_device_info(i915)->page_sizes = saved_mask; + RUNTIME_INFO(i915)->page_sizes = saved_mask; return err; } @@ -495,7 +495,7 @@ static int igt_mock_memory_region_huge_pages(void *arg) const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS }; struct i915_ppgtt *ppgtt = arg; struct drm_i915_private *i915 = ppgtt->vm.i915; - unsigned long supported = INTEL_INFO(i915)->page_sizes; + unsigned long supported = RUNTIME_INFO(i915)->page_sizes; struct intel_memory_region *mem; struct drm_i915_gem_object *obj; struct i915_vma *vma; @@ -573,7 +573,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg) { struct i915_ppgtt *ppgtt = arg; struct drm_i915_private *i915 = ppgtt->vm.i915; - unsigned long supported = INTEL_INFO(i915)->page_sizes; + unsigned long supported = RUNTIME_INFO(i915)->page_sizes; struct drm_i915_gem_object *obj; int bit; int err; @@ -1390,7 +1390,7 @@ out_put: static int igt_ppgtt_sanity_check(void *arg) { struct drm_i915_private *i915 = arg; - unsigned int supported = INTEL_INFO(i915)->page_sizes; + unsigned int supported = RUNTIME_INFO(i915)->page_sizes; struct { igt_create_fn fn; unsigned int flags; @@ -1764,8 +1764,8 @@ int i915_gem_huge_page_mock_selftests(void) return -ENOMEM; /* Pretend to be a device which supports the 48b PPGTT */ - mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL; - mkwrite_device_info(dev_priv)->ppgtt_size = 48; + RUNTIME_INFO(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL; + RUNTIME_INFO(dev_priv)->ppgtt_size = 48; ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0); if (IS_ERR(ppgtt)) { diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index 3cfc621ef363..9a6a6b5b722b 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -711,7 +711,7 @@ static bool bad_swizzling(struct drm_i915_private *i915) { struct i915_ggtt *ggtt = to_gt(i915)->ggtt; - if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) + if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) return true; if (has_bit17_swizzle(ggtt->bit_6_swizzle_x) || diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 55bf23dc0e54..b73c91aa5450 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -367,7 +367,7 @@ static int igt_partial_tiling(void *arg) unsigned int pitch; struct tile tile; - if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) + if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) /* * The swizzling pattern is actually unknown as it * varies based on physical address of each page. @@ -464,7 +464,7 @@ static int igt_smoke_tiling(void *arg) * Remember to look at the st_seed if we see a flip-flop in BAT! */ - if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) + if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) return 0; obj = huge_gem_object(i915, diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c index 1bb766c79dcb..5aaacc53fa4c 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c @@ -247,6 +247,7 @@ err_scratch1: i915_gem_object_put(vm->scratch[1]); err_scratch0: i915_gem_object_put(vm->scratch[0]); + vm->scratch[0] = NULL; return ret; } @@ -268,9 +269,10 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) gen6_ppgtt_free_pd(ppgtt); free_scratch(vm); - mutex_destroy(&ppgtt->flush); + if (ppgtt->base.pd) + free_pd(&ppgtt->base.vm, ppgtt->base.pd); - free_pd(&ppgtt->base.vm, ppgtt->base.pd); + mutex_destroy(&ppgtt->flush); } static void pd_vma_bind(struct i915_address_space *vm, @@ -449,19 +451,17 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) err = gen6_ppgtt_init_scratch(ppgtt); if (err) - goto err_free; + goto err_put; ppgtt->base.pd = gen6_alloc_top_pd(ppgtt); if (IS_ERR(ppgtt->base.pd)) { err = PTR_ERR(ppgtt->base.pd); - goto err_scratch; + goto err_put; } return &ppgtt->base; -err_scratch: - free_scratch(&ppgtt->base.vm); -err_free: - kfree(ppgtt); +err_put: + i915_vm_put(&ppgtt->base.vm); return ERR_PTR(err); } diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index 98645797962f..e49fa6fa6aee 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -165,10 +165,12 @@ static u32 preparser_disable(bool state) return MI_ARB_CHECK | 1 << 8 | state; } -u32 *gen12_emit_aux_table_inv(u32 *cs, const i915_reg_t inv_reg) +u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg) { + u32 gsi_offset = gt->uncore->gsi_offset; + *cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN; - *cs++ = i915_mmio_reg_offset(inv_reg); + *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset; *cs++ = AUX_INV; *cs++ = MI_NOOP; @@ -254,7 +256,8 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) if (!HAS_FLAT_CCS(rq->engine->i915)) { /* hsdes: 1809175790 */ - cs = gen12_emit_aux_table_inv(cs, GEN12_GFX_CCS_AUX_NV); + cs = gen12_emit_aux_table_inv(rq->engine->gt, + cs, GEN12_GFX_CCS_AUX_NV); } *cs++ = preparser_disable(false); @@ -313,9 +316,11 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) if (aux_inv) { /* hsdes: 1809175790 */ if (rq->engine->class == VIDEO_DECODE_CLASS) - cs = gen12_emit_aux_table_inv(cs, GEN12_VD0_AUX_NV); + cs = gen12_emit_aux_table_inv(rq->engine->gt, + cs, GEN12_VD0_AUX_NV); else - cs = gen12_emit_aux_table_inv(cs, GEN12_VE0_AUX_NV); + cs = gen12_emit_aux_table_inv(rq->engine->gt, + cs, GEN12_VE0_AUX_NV); } if (mode & EMIT_INVALIDATE) diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h index 32e3d2b831bb..e4d24c811dd6 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h @@ -13,6 +13,7 @@ #include "intel_gt_regs.h" #include "intel_gpu_commands.h" +struct intel_gt; struct i915_request; int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode); @@ -45,7 +46,7 @@ u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); -u32 *gen12_emit_aux_table_inv(u32 *cs, const i915_reg_t inv_reg); +u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg); static inline u32 * __gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset) diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c index c7bd5d71b03e..2128b7a72a25 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c @@ -196,7 +196,10 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) if (intel_vgpu_active(vm->i915)) gen8_ppgtt_notify_vgt(ppgtt, false); - __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top); + if (ppgtt->pd) + __gen8_ppgtt_cleanup(vm, ppgtt->pd, + gen8_pd_top_count(vm), vm->top); + free_scratch(vm); } @@ -803,8 +806,10 @@ static int gen8_init_scratch(struct i915_address_space *vm) struct drm_i915_gem_object *obj; obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K); - if (IS_ERR(obj)) + if (IS_ERR(obj)) { + ret = PTR_ERR(obj); goto free_scratch; + } ret = map_pt_dma(vm, obj); if (ret) { @@ -823,7 +828,8 @@ static int gen8_init_scratch(struct i915_address_space *vm) free_scratch: while (i--) i915_gem_object_put(vm->scratch[i]); - return -ENOMEM; + vm->scratch[0] = NULL; + return ret; } static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) @@ -901,6 +907,7 @@ err_pd: struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt, unsigned long lmem_pt_obj_flags) { + struct i915_page_directory *pd; struct i915_ppgtt *ppgtt; int err; @@ -946,21 +953,7 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt, ppgtt->vm.alloc_scratch_dma = alloc_pt_dma; } - err = gen8_init_scratch(&ppgtt->vm); - if (err) - goto err_free; - - ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm); - if (IS_ERR(ppgtt->pd)) { - err = PTR_ERR(ppgtt->pd); - goto err_free_scratch; - } - - if (!i915_vm_is_4lvl(&ppgtt->vm)) { - err = gen8_preallocate_top_level_pdp(ppgtt); - if (err) - goto err_free_pd; - } + ppgtt->vm.pte_encode = gen8_pte_encode; ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND; ppgtt->vm.insert_entries = gen8_ppgtt_insert; @@ -971,22 +964,31 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt, ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc; ppgtt->vm.clear_range = gen8_ppgtt_clear; ppgtt->vm.foreach = gen8_ppgtt_foreach; + ppgtt->vm.cleanup = gen8_ppgtt_cleanup; - ppgtt->vm.pte_encode = gen8_pte_encode; + err = gen8_init_scratch(&ppgtt->vm); + if (err) + goto err_put; + + pd = gen8_alloc_top_pd(&ppgtt->vm); + if (IS_ERR(pd)) { + err = PTR_ERR(pd); + goto err_put; + } + ppgtt->pd = pd; + + if (!i915_vm_is_4lvl(&ppgtt->vm)) { + err = gen8_preallocate_top_level_pdp(ppgtt); + if (err) + goto err_put; + } if (intel_vgpu_active(gt->i915)) gen8_ppgtt_notify_vgt(ppgtt, true); - ppgtt->vm.cleanup = gen8_ppgtt_cleanup; - return ppgtt; -err_free_pd: - __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd, - gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top); -err_free_scratch: - free_scratch(&ppgtt->vm); -err_free: - kfree(ppgtt); +err_put: + i915_vm_put(&ppgtt->vm); return ERR_PTR(err); } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 17e7f20bbb48..1f7188129cd1 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -654,16 +654,83 @@ bool gen11_vdbox_has_sfc(struct intel_gt *gt, */ if ((gt->info.sfc_mask & BIT(physical_vdbox / 2)) == 0) return false; - else if (GRAPHICS_VER(i915) == 12) + else if (MEDIA_VER(i915) >= 12) return (physical_vdbox % 2 == 0) || !(BIT(physical_vdbox - 1) & vdbox_mask); - else if (GRAPHICS_VER(i915) == 11) + else if (MEDIA_VER(i915) == 11) return logical_vdbox % 2 == 0; - MISSING_CASE(GRAPHICS_VER(i915)); return false; } +static void engine_mask_apply_media_fuses(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + unsigned int logical_vdbox = 0; + unsigned int i; + u32 media_fuse, fuse1; + u16 vdbox_mask; + u16 vebox_mask; + + if (MEDIA_VER(gt->i915) < 11) + return; + + /* + * On newer platforms the fusing register is called 'enable' and has + * enable semantics, while on older platforms it is called 'disable' + * and bits have disable semantices. + */ + media_fuse = intel_uncore_read(gt->uncore, GEN11_GT_VEBOX_VDBOX_DISABLE); + if (MEDIA_VER_FULL(i915) < IP_VER(12, 50)) + media_fuse = ~media_fuse; + + vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; + vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> + GEN11_GT_VEBOX_DISABLE_SHIFT; + + if (MEDIA_VER_FULL(i915) >= IP_VER(12, 50)) { + fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1); + gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1); + } else { + gt->info.sfc_mask = ~0; + } + + for (i = 0; i < I915_MAX_VCS; i++) { + if (!HAS_ENGINE(gt, _VCS(i))) { + vdbox_mask &= ~BIT(i); + continue; + } + + if (!(BIT(i) & vdbox_mask)) { + gt->info.engine_mask &= ~BIT(_VCS(i)); + drm_dbg(&i915->drm, "vcs%u fused off\n", i); + continue; + } + + if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask)) + gt->info.vdbox_sfc_access |= BIT(i); + logical_vdbox++; + } + drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n", + vdbox_mask, VDBOX_MASK(gt)); + GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt)); + + for (i = 0; i < I915_MAX_VECS; i++) { + if (!HAS_ENGINE(gt, _VECS(i))) { + vebox_mask &= ~BIT(i); + continue; + } + + if (!(BIT(i) & vebox_mask)) { + gt->info.engine_mask &= ~BIT(_VECS(i)); + drm_dbg(&i915->drm, "vecs%u fused off\n", i); + } + } + drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n", + vebox_mask, VEBOX_MASK(gt)); + GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt)); +} + static void engine_mask_apply_compute_fuses(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; @@ -672,6 +739,9 @@ static void engine_mask_apply_compute_fuses(struct intel_gt *gt) unsigned long ccs_mask; unsigned int i; + if (GRAPHICS_VER(i915) < 11) + return; + if (hweight32(CCS_MASK(gt)) <= 1) return; @@ -694,6 +764,10 @@ static void engine_mask_apply_copy_fuses(struct intel_gt *gt) unsigned long meml3_mask; unsigned long quad; + if (!(GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60) && + GRAPHICS_VER_FULL(i915) < IP_VER(12, 70))) + return; + meml3_mask = intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3); meml3_mask = REG_FIELD_GET(GEN12_MEML3_EN_MASK, meml3_mask); @@ -727,75 +801,11 @@ static void engine_mask_apply_copy_fuses(struct intel_gt *gt) */ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) { - struct drm_i915_private *i915 = gt->i915; struct intel_gt_info *info = >->info; - struct intel_uncore *uncore = gt->uncore; - unsigned int logical_vdbox = 0; - unsigned int i; - u32 media_fuse, fuse1; - u16 vdbox_mask; - u16 vebox_mask; - - info->engine_mask = INTEL_INFO(i915)->platform_engine_mask; - - if (GRAPHICS_VER(i915) < 11) - return info->engine_mask; - /* - * On newer platforms the fusing register is called 'enable' and has - * enable semantics, while on older platforms it is called 'disable' - * and bits have disable semantices. - */ - media_fuse = intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE); - if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) - media_fuse = ~media_fuse; - - vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; - vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> - GEN11_GT_VEBOX_DISABLE_SHIFT; - - if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { - fuse1 = intel_uncore_read(uncore, HSW_PAVP_FUSE1); - gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1); - } else { - gt->info.sfc_mask = ~0; - } - - for (i = 0; i < I915_MAX_VCS; i++) { - if (!HAS_ENGINE(gt, _VCS(i))) { - vdbox_mask &= ~BIT(i); - continue; - } - - if (!(BIT(i) & vdbox_mask)) { - info->engine_mask &= ~BIT(_VCS(i)); - drm_dbg(&i915->drm, "vcs%u fused off\n", i); - continue; - } - - if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask)) - gt->info.vdbox_sfc_access |= BIT(i); - logical_vdbox++; - } - drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n", - vdbox_mask, VDBOX_MASK(gt)); - GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt)); - - for (i = 0; i < I915_MAX_VECS; i++) { - if (!HAS_ENGINE(gt, _VECS(i))) { - vebox_mask &= ~BIT(i); - continue; - } - - if (!(BIT(i) & vebox_mask)) { - info->engine_mask &= ~BIT(_VECS(i)); - drm_dbg(&i915->drm, "vecs%u fused off\n", i); - } - } - drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n", - vebox_mask, VEBOX_MASK(gt)); - GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt)); + GEM_BUG_ON(!info->engine_mask); + engine_mask_apply_media_fuses(gt); engine_mask_apply_compute_fuses(gt); engine_mask_apply_copy_fuses(gt); @@ -1688,9 +1698,9 @@ bool intel_engine_irq_enable(struct intel_engine_cs *engine) return false; /* Caller disables interrupts */ - spin_lock(&engine->gt->irq_lock); + spin_lock(engine->gt->irq_lock); engine->irq_enable(engine); - spin_unlock(&engine->gt->irq_lock); + spin_unlock(engine->gt->irq_lock); return true; } @@ -1701,9 +1711,9 @@ void intel_engine_irq_disable(struct intel_engine_cs *engine) return; /* Caller disables interrupts */ - spin_lock(&engine->gt->irq_lock); + spin_lock(engine->gt->irq_lock); engine->irq_disable(engine); - spin_unlock(&engine->gt->irq_lock); + spin_unlock(engine->gt->irq_lock); } void intel_engines_reset_default_submission(struct intel_gt *gt) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h index 889f0df3940b..fe1a0d5fd4b1 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h @@ -110,6 +110,7 @@ #define RING_SBBSTATE(base) _MMIO((base) + 0x118) /* hsw+ */ #define RING_SBBADDR_UDW(base) _MMIO((base) + 0x11c) /* gen8+ */ #define RING_BBADDR(base) _MMIO((base) + 0x140) +#define RING_BB_OFFSET(base) _MMIO((base) + 0x158) #define RING_BBADDR_UDW(base) _MMIO((base) + 0x168) /* gen8+ */ #define CCID(base) _MMIO((base) + 0x180) #define CCID_EN BIT(0) diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 15a915bb4088..30cf5c3369d9 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -16,7 +16,9 @@ #include "intel_ggtt_gmch.h" #include "intel_gt.h" #include "intel_gt_regs.h" +#include "intel_pci_config.h" #include "i915_drv.h" +#include "i915_pci.h" #include "i915_scatterlist.h" #include "i915_utils.h" #include "i915_vgpu.h" @@ -869,8 +871,8 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) u32 pte_flags; int ret; - GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915)); - phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915); + GEM_WARN_ON(pci_resource_len(pdev, GTTMMADR_BAR) != gen6_gttmmadr_size(i915)); + phys_addr = pci_resource_start(pdev, GTTMMADR_BAR) + gen6_gttadr_offset(i915); /* * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range @@ -930,7 +932,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) u16 snb_gmch_ctl; if (!HAS_LMEM(i915)) { - ggtt->gmadr = pci_resource(pdev, 2); + if (!i915_pci_resource_valid(pdev, GTT_APERTURE_BAR)) + return -ENXIO; + + ggtt->gmadr = pci_resource(pdev, GTT_APERTURE_BAR); ggtt->mappable_end = resource_size(&ggtt->gmadr); } @@ -1084,7 +1089,10 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt) unsigned int size; u16 snb_gmch_ctl; - ggtt->gmadr = pci_resource(pdev, 2); + if (!i915_pci_resource_valid(pdev, GTT_APERTURE_BAR)) + return -ENXIO; + + ggtt->gmadr = pci_resource(pdev, GTT_APERTURE_BAR); ggtt->mappable_end = resource_size(&ggtt->gmadr); /* diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c index 6ebda3d65086..ea775e601686 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c @@ -727,7 +727,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt) * bit17 dependent, and so we need to also prevent the pages * from being moved. */ - i915->quirks |= QUIRK_PIN_SWIZZLED_PAGES; + i915->gem_quirks |= GEM_QUIRK_PIN_SWIZZLED_PAGES; swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; } @@ -842,7 +842,6 @@ void intel_ggtt_init_fences(struct i915_ggtt *ggtt) INIT_LIST_HEAD(&ggtt->fence_list); INIT_LIST_HEAD(&ggtt->userfault_list); - intel_wakeref_auto_init(&ggtt->userfault_wakeref, uncore->rpm); detect_bit_6_swizzle(ggtt); diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c index 0e494028b81d..7af6db3194dd 100644 --- a/drivers/gpu/drm/i915/gt/intel_gsc.c +++ b/drivers/gpu/drm/i915/gt/intel_gsc.c @@ -7,6 +7,7 @@ #include <linux/mei_aux.h> #include "i915_drv.h" #include "i915_reg.h" +#include "gem/i915_gem_region.h" #include "gt/intel_gsc.h" #include "gt/intel_gt.h" @@ -36,10 +37,56 @@ static int gsc_irq_init(int irq) return irq_set_chip_data(irq, NULL); } +static int +gsc_ext_om_alloc(struct intel_gsc *gsc, struct intel_gsc_intf *intf, size_t size) +{ + struct intel_gt *gt = gsc_to_gt(gsc); + struct drm_i915_gem_object *obj; + int err; + + obj = i915_gem_object_create_lmem(gt->i915, size, + I915_BO_ALLOC_CONTIGUOUS | + I915_BO_ALLOC_CPU_CLEAR); + if (IS_ERR(obj)) { + drm_err(>->i915->drm, "Failed to allocate gsc memory\n"); + return PTR_ERR(obj); + } + + err = i915_gem_object_pin_pages_unlocked(obj); + if (err) { + drm_err(>->i915->drm, "Failed to pin pages for gsc memory\n"); + goto out_put; + } + + intf->gem_obj = obj; + + return 0; + +out_put: + i915_gem_object_put(obj); + return err; +} + +static void gsc_ext_om_destroy(struct intel_gsc_intf *intf) +{ + struct drm_i915_gem_object *obj = fetch_and_zero(&intf->gem_obj); + + if (!obj) + return; + + if (i915_gem_object_has_pinned_pages(obj)) + i915_gem_object_unpin_pages(obj); + + i915_gem_object_put(obj); +} + struct gsc_def { const char *name; unsigned long bar; size_t bar_size; + bool use_polling; + bool slow_firmware; + size_t lmem_size; }; /* gsc resources and definitions (HECI1 and HECI2) */ @@ -54,11 +101,25 @@ static const struct gsc_def gsc_def_dg1[] = { } }; +static const struct gsc_def gsc_def_xehpsdv[] = { + { + /* HECI1 not enabled on the device. */ + }, + { + .name = "mei-gscfi", + .bar = DG1_GSC_HECI2_BASE, + .bar_size = GSC_BAR_LENGTH, + .use_polling = true, + .slow_firmware = true, + } +}; + static const struct gsc_def gsc_def_dg2[] = { { .name = "mei-gsc", .bar = DG2_GSC_HECI1_BASE, .bar_size = GSC_BAR_LENGTH, + .lmem_size = SZ_4M, }, { .name = "mei-gscfi", @@ -75,26 +136,32 @@ static void gsc_release_dev(struct device *dev) kfree(adev); } -static void gsc_destroy_one(struct intel_gsc_intf *intf) +static void gsc_destroy_one(struct drm_i915_private *i915, + struct intel_gsc *gsc, unsigned int intf_id) { + struct intel_gsc_intf *intf = &gsc->intf[intf_id]; + if (intf->adev) { auxiliary_device_delete(&intf->adev->aux_dev); auxiliary_device_uninit(&intf->adev->aux_dev); intf->adev = NULL; } + if (intf->irq >= 0) irq_free_desc(intf->irq); intf->irq = -1; + + gsc_ext_om_destroy(intf); } -static void gsc_init_one(struct drm_i915_private *i915, - struct intel_gsc_intf *intf, +static void gsc_init_one(struct drm_i915_private *i915, struct intel_gsc *gsc, unsigned int intf_id) { struct pci_dev *pdev = to_pci_dev(i915->drm.dev); struct mei_aux_device *adev; struct auxiliary_device *aux_dev; const struct gsc_def *def; + struct intel_gsc_intf *intf = &gsc->intf[intf_id]; int ret; intf->irq = -1; @@ -105,6 +172,8 @@ static void gsc_init_one(struct drm_i915_private *i915, if (IS_DG1(i915)) { def = &gsc_def_dg1[intf_id]; + } else if (IS_XEHPSDV(i915)) { + def = &gsc_def_xehpsdv[intf_id]; } else if (IS_DG2(i915)) { def = &gsc_def_dg2[intf_id]; } else { @@ -117,10 +186,14 @@ static void gsc_init_one(struct drm_i915_private *i915, return; } + /* skip irq initialization */ + if (def->use_polling) + goto add_device; + intf->irq = irq_alloc_desc(0); if (intf->irq < 0) { drm_err(&i915->drm, "gsc irq error %d\n", intf->irq); - return; + goto fail; } ret = gsc_irq_init(intf->irq); @@ -129,16 +202,31 @@ static void gsc_init_one(struct drm_i915_private *i915, goto fail; } +add_device: adev = kzalloc(sizeof(*adev), GFP_KERNEL); if (!adev) goto fail; + if (def->lmem_size) { + drm_dbg(&i915->drm, "setting up GSC lmem\n"); + + if (gsc_ext_om_alloc(gsc, intf, def->lmem_size)) { + drm_err(&i915->drm, "setting up gsc extended operational memory failed\n"); + kfree(adev); + goto fail; + } + + adev->ext_op_mem.start = i915_gem_object_get_dma_address(intf->gem_obj, 0); + adev->ext_op_mem.end = adev->ext_op_mem.start + def->lmem_size; + } + adev->irq = intf->irq; adev->bar.parent = &pdev->resource[0]; adev->bar.start = def->bar + pdev->resource[0].start; adev->bar.end = adev->bar.start + def->bar_size - 1; adev->bar.flags = IORESOURCE_MEM; adev->bar.desc = IORES_DESC_NONE; + adev->slow_firmware = def->slow_firmware; aux_dev = &adev->aux_dev; aux_dev->name = def->name; @@ -165,7 +253,7 @@ static void gsc_init_one(struct drm_i915_private *i915, return; fail: - gsc_destroy_one(intf); + gsc_destroy_one(i915, gsc, intf->id); } static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id) @@ -182,10 +270,8 @@ static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id) return; } - if (gt->gsc.intf[intf_id].irq < 0) { - drm_err_ratelimited(>->i915->drm, "GSC irq: irq not set"); + if (gt->gsc.intf[intf_id].irq < 0) return; - } ret = generic_handle_irq(gt->gsc.intf[intf_id].irq); if (ret) @@ -208,7 +294,7 @@ void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *i915) return; for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++) - gsc_init_one(i915, &gsc->intf[i], i); + gsc_init_one(i915, gsc, i); } void intel_gsc_fini(struct intel_gsc *gsc) @@ -220,5 +306,5 @@ void intel_gsc_fini(struct intel_gsc *gsc) return; for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++) - gsc_destroy_one(&gsc->intf[i]); + gsc_destroy_one(gt->i915, gsc, i); } diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.h b/drivers/gpu/drm/i915/gt/intel_gsc.h index 68582f912b21..fcac1775e9c3 100644 --- a/drivers/gpu/drm/i915/gt/intel_gsc.h +++ b/drivers/gpu/drm/i915/gt/intel_gsc.h @@ -20,11 +20,14 @@ struct mei_aux_device; /** * struct intel_gsc - graphics security controller + * + * @gem_obj: scratch memory GSC operations * @intf : gsc interface */ struct intel_gsc { struct intel_gsc_intf { struct mei_aux_device *adev; + struct drm_i915_gem_object *gem_obj; int irq; unsigned int id; } intf[INTEL_GSC_NUM_INTERFACES]; diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index f435e06125aa..d0b03a928b9a 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -26,18 +26,22 @@ #include "intel_gt_requests.h" #include "intel_migrate.h" #include "intel_mocs.h" +#include "intel_pci_config.h" #include "intel_pm.h" #include "intel_rc6.h" #include "intel_renderstate.h" #include "intel_rps.h" +#include "intel_sa_media.h" #include "intel_gt_sysfs.h" #include "intel_uncore.h" #include "shmem_utils.h" -static void __intel_gt_init_early(struct intel_gt *gt) +void intel_gt_common_init_early(struct intel_gt *gt) { - spin_lock_init(>->irq_lock); + spin_lock_init(gt->irq_lock); + INIT_LIST_HEAD(>->lmem_userfault_list); + mutex_init(>->lmem_userfault_lock); INIT_LIST_HEAD(>->closed_vma); spin_lock_init(>->closed_lock); @@ -57,14 +61,19 @@ static void __intel_gt_init_early(struct intel_gt *gt) } /* Preliminary initialization of Tile 0 */ -void intel_root_gt_init_early(struct drm_i915_private *i915) +int intel_root_gt_init_early(struct drm_i915_private *i915) { struct intel_gt *gt = to_gt(i915); gt->i915 = i915; gt->uncore = &i915->uncore; + gt->irq_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->irq_lock), GFP_KERNEL); + if (!gt->irq_lock) + return -ENOMEM; - __intel_gt_init_early(gt); + intel_gt_common_init_early(gt); + + return 0; } static int intel_gt_probe_lmem(struct intel_gt *gt) @@ -780,26 +789,25 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr) int ret; if (!gt_is_root(gt)) { - struct intel_uncore_mmio_debug *mmio_debug; struct intel_uncore *uncore; + spinlock_t *irq_lock; - uncore = kzalloc(sizeof(*uncore), GFP_KERNEL); + uncore = drmm_kzalloc(>->i915->drm, sizeof(*uncore), GFP_KERNEL); if (!uncore) return -ENOMEM; - mmio_debug = kzalloc(sizeof(*mmio_debug), GFP_KERNEL); - if (!mmio_debug) { - kfree(uncore); + irq_lock = drmm_kzalloc(>->i915->drm, sizeof(*irq_lock), GFP_KERNEL); + if (!irq_lock) return -ENOMEM; - } gt->uncore = uncore; - gt->uncore->debug = mmio_debug; + gt->irq_lock = irq_lock; - __intel_gt_init_early(gt); + intel_gt_common_init_early(gt); } intel_uncore_init_early(gt->uncore, gt); + intel_wakeref_auto_init(>->userfault_wakeref, gt->uncore->rpm); ret = intel_uncore_setup_mmio(gt->uncore, phys_addr); if (ret) @@ -810,27 +818,17 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr) return 0; } -static void -intel_gt_tile_cleanup(struct intel_gt *gt) -{ - intel_uncore_cleanup_mmio(gt->uncore); - - if (!gt_is_root(gt)) { - kfree(gt->uncore->debug); - kfree(gt->uncore); - kfree(gt); - } -} - int intel_gt_probe_all(struct drm_i915_private *i915) { struct pci_dev *pdev = to_pci_dev(i915->drm.dev); struct intel_gt *gt = &i915->gt0; + const struct intel_gt_definition *gtdef; phys_addr_t phys_addr; unsigned int mmio_bar; + unsigned int i; int ret; - mmio_bar = GRAPHICS_VER(i915) == 2 ? 1 : 0; + mmio_bar = GRAPHICS_VER(i915) == 2 ? GEN2_GTTMMADR_BAR : GTTMMADR_BAR; phys_addr = pci_resource_start(pdev, mmio_bar); /* @@ -838,14 +836,74 @@ int intel_gt_probe_all(struct drm_i915_private *i915) * and it has been already initialized early during probe * in i915_driver_probe() */ + gt->i915 = i915; + gt->name = "Primary GT"; + gt->info.engine_mask = RUNTIME_INFO(i915)->platform_engine_mask; + + drm_dbg(&i915->drm, "Setting up %s\n", gt->name); ret = intel_gt_tile_setup(gt, phys_addr); if (ret) return ret; i915->gt[0] = gt; - /* TODO: add more tiles */ + if (!HAS_EXTRA_GT_LIST(i915)) + return 0; + + for (i = 1, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]; + gtdef->name != NULL; + i++, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]) { + gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL); + if (!gt) { + ret = -ENOMEM; + goto err; + } + + gt->i915 = i915; + gt->name = gtdef->name; + gt->type = gtdef->type; + gt->info.engine_mask = gtdef->engine_mask; + gt->info.id = i; + + drm_dbg(&i915->drm, "Setting up %s\n", gt->name); + if (GEM_WARN_ON(range_overflows_t(resource_size_t, + gtdef->mapping_base, + SZ_16M, + pci_resource_len(pdev, mmio_bar)))) { + ret = -ENODEV; + goto err; + } + + switch (gtdef->type) { + case GT_TILE: + ret = intel_gt_tile_setup(gt, phys_addr + gtdef->mapping_base); + break; + + case GT_MEDIA: + ret = intel_sa_mediagt_setup(gt, phys_addr + gtdef->mapping_base, + gtdef->gsi_offset); + break; + + case GT_PRIMARY: + /* Primary GT should not appear in extra GT list */ + default: + MISSING_CASE(gtdef->type); + ret = -ENODEV; + } + + if (ret) + goto err; + + i915->gt[i] = gt; + } + return 0; + +err: + i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret); + intel_gt_release_all(i915); + + return ret; } int intel_gt_tiles_init(struct drm_i915_private *i915) @@ -868,10 +926,8 @@ void intel_gt_release_all(struct drm_i915_private *i915) struct intel_gt *gt; unsigned int id; - for_each_gt(gt, i915, id) { - intel_gt_tile_cleanup(gt); + for_each_gt(gt, i915, id) i915->gt[id] = NULL; - } } void intel_gt_info_print(const struct intel_gt_info *info, diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 40b06adf509a..2ee582e287c8 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -44,7 +44,8 @@ static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc) return container_of(gsc, struct intel_gt, gsc); } -void intel_root_gt_init_early(struct drm_i915_private *i915); +void intel_gt_common_init_early(struct intel_gt *gt); +int intel_root_gt_init_early(struct drm_i915_private *i915); int intel_gt_assign_ggtt(struct intel_gt *gt); int intel_gt_init_mmio(struct intel_gt *gt); int __must_check intel_gt_init_hw(struct intel_gt *gt); @@ -54,7 +55,6 @@ void intel_gt_driver_register(struct intel_gt *gt); void intel_gt_driver_unregister(struct intel_gt *gt); void intel_gt_driver_remove(struct intel_gt *gt); void intel_gt_driver_release(struct intel_gt *gt); - void intel_gt_driver_late_release_all(struct drm_i915_private *i915); int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c index d5d1b04dbcad..3f656d3dba9a 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c @@ -26,26 +26,6 @@ static u32 read_reference_ts_freq(struct intel_uncore *uncore) return base_freq + frac_freq; } -static u32 gen9_get_crystal_clock_freq(struct intel_uncore *uncore, - u32 rpm_config_reg) -{ - u32 f19_2_mhz = 19200000; - u32 f24_mhz = 24000000; - u32 crystal_clock = - (rpm_config_reg & GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >> - GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT; - - switch (crystal_clock) { - case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ: - return f19_2_mhz; - case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ: - return f24_mhz; - default: - MISSING_CASE(crystal_clock); - return 0; - } -} - static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore, u32 rpm_config_reg) { @@ -72,98 +52,106 @@ static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore, } } -static u32 read_clock_frequency(struct intel_uncore *uncore) +static u32 gen11_read_clock_frequency(struct intel_uncore *uncore) { - u32 f12_5_mhz = 12500000; - u32 f19_2_mhz = 19200000; - u32 f24_mhz = 24000000; + u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE); + u32 freq = 0; + + /* + * Note that on gen11+, the clock frequency may be reconfigured. + * We do not, and we assume nobody else does. + * + * First figure out the reference frequency. There are 2 ways + * we can compute the frequency, either through the + * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE + * tells us which one we should use. + */ + if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) { + freq = read_reference_ts_freq(uncore); + } else { + u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0); + + freq = gen11_get_crystal_clock_freq(uncore, c0); - if (GRAPHICS_VER(uncore->i915) <= 4) { - /* - * PRMs say: - * - * "The value in this register increments once every 16 - * hclks." (through the “Clocking Configuration” - * (“CLKCFG”) MCHBAR register) - */ - return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16; - } else if (GRAPHICS_VER(uncore->i915) <= 8) { /* - * PRMs say: - * - * "The PCU TSC counts 10ns increments; this timestamp - * reflects bits 38:3 of the TSC (i.e. 80ns granularity, - * rolling over every 1.5 hours). + * Now figure out how the command stream's timestamp + * register increments from this frequency (it might + * increment only every few clock cycle). */ - return f12_5_mhz; - } else if (GRAPHICS_VER(uncore->i915) <= 9) { - u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE); - u32 freq = 0; - - if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) { - freq = read_reference_ts_freq(uncore); - } else { - freq = IS_GEN9_LP(uncore->i915) ? f19_2_mhz : f24_mhz; - - /* - * Now figure out how the command stream's timestamp - * register increments from this frequency (it might - * increment only every few clock cycle). - */ - freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >> - CTC_SHIFT_PARAMETER_SHIFT); - } - - return freq; - } else if (GRAPHICS_VER(uncore->i915) <= 12) { - u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE); - u32 freq = 0; + freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >> + GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT); + } + + return freq; +} + +static u32 gen9_read_clock_frequency(struct intel_uncore *uncore) +{ + u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE); + u32 freq = 0; + + if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) { + freq = read_reference_ts_freq(uncore); + } else { + freq = IS_GEN9_LP(uncore->i915) ? 19200000 : 24000000; /* - * First figure out the reference frequency. There are 2 ways - * we can compute the frequency, either through the - * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE - * tells us which one we should use. + * Now figure out how the command stream's timestamp + * register increments from this frequency (it might + * increment only every few clock cycle). */ - if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) { - freq = read_reference_ts_freq(uncore); - } else { - u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0); - - if (GRAPHICS_VER(uncore->i915) >= 11) - freq = gen11_get_crystal_clock_freq(uncore, c0); - else - freq = gen9_get_crystal_clock_freq(uncore, c0); - - /* - * Now figure out how the command stream's timestamp - * register increments from this frequency (it might - * increment only every few clock cycle). - */ - freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >> - GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT); - } - - return freq; + freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >> + CTC_SHIFT_PARAMETER_SHIFT); } - MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n"); - return 0; + return freq; } -void intel_gt_init_clock_frequency(struct intel_gt *gt) +static u32 gen5_read_clock_frequency(struct intel_uncore *uncore) { /* - * Note that on gen11+, the clock frequency may be reconfigured. - * We do not, and we assume nobody else does. + * PRMs say: + * + * "The PCU TSC counts 10ns increments; this timestamp + * reflects bits 38:3 of the TSC (i.e. 80ns granularity, + * rolling over every 1.5 hours). + */ + return 12500000; +} + +static u32 gen2_read_clock_frequency(struct intel_uncore *uncore) +{ + /* + * PRMs say: + * + * "The value in this register increments once every 16 + * hclks." (through the “Clocking Configuration” + * (“CLKCFG”) MCHBAR register) */ + return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16; +} + +static u32 read_clock_frequency(struct intel_uncore *uncore) +{ + if (GRAPHICS_VER(uncore->i915) >= 11) + return gen11_read_clock_frequency(uncore); + else if (GRAPHICS_VER(uncore->i915) >= 9) + return gen9_read_clock_frequency(uncore); + else if (GRAPHICS_VER(uncore->i915) >= 5) + return gen5_read_clock_frequency(uncore); + else + return gen2_read_clock_frequency(uncore); +} + +void intel_gt_init_clock_frequency(struct intel_gt *gt) +{ gt->clock_frequency = read_clock_frequency(gt->uncore); - if (gt->clock_frequency) - gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1); /* Icelake appears to use another fixed frequency for CTX_TIMESTAMP */ if (GRAPHICS_VER(gt->i915) == 11) gt->clock_period_ns = NSEC_PER_SEC / 13750000; + else if (gt->clock_frequency) + gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1); GT_TRACE(gt, "Using clock frequency: %dkHz, period: %dns, wrap: %lldms\n", diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c index 3a72d4fd0214..f26882fdc24c 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -29,7 +29,7 @@ gen11_gt_engine_identity(struct intel_gt *gt, u32 timeout_ts; u32 ident; - lockdep_assert_held(>->irq_lock); + lockdep_assert_held(gt->irq_lock); raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); @@ -59,11 +59,17 @@ static void gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, const u16 iir) { + struct intel_gt *media_gt = gt->i915->media_gt; + if (instance == OTHER_GUC_INSTANCE) return guc_irq_handler(>->uc.guc, iir); + if (instance == OTHER_MEDIA_GUC_INSTANCE && media_gt) + return guc_irq_handler(&media_gt->uc.guc, iir); if (instance == OTHER_GTPM_INSTANCE) return gen11_rps_irq_handler(>->rps, iir); + if (instance == OTHER_MEDIA_GTPM_INSTANCE && media_gt) + return gen11_rps_irq_handler(&media_gt->rps, iir); if (instance == OTHER_KCR_INSTANCE) return intel_pxp_irq_handler(>->pxp, iir); @@ -81,6 +87,18 @@ gen11_engine_irq_handler(struct intel_gt *gt, const u8 class, { struct intel_engine_cs *engine; + /* + * Platforms with standalone media have their media engines in another + * GT. + */ + if (MEDIA_VER(gt->i915) >= 13 && + (class == VIDEO_DECODE_CLASS || class == VIDEO_ENHANCEMENT_CLASS)) { + if (!gt->i915->media_gt) + goto err; + + gt = gt->i915->media_gt; + } + if (instance <= MAX_ENGINE_INSTANCE) engine = gt->engine_class[class][instance]; else @@ -89,6 +107,7 @@ gen11_engine_irq_handler(struct intel_gt *gt, const u8 class, if (likely(engine)) return intel_engine_cs_irq(engine, iir); +err: WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", class, instance); } @@ -120,7 +139,7 @@ gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank) unsigned long intr_dw; unsigned int bit; - lockdep_assert_held(>->irq_lock); + lockdep_assert_held(gt->irq_lock); intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); @@ -138,14 +157,14 @@ void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl) { unsigned int bank; - spin_lock(>->irq_lock); + spin_lock(gt->irq_lock); for (bank = 0; bank < 2; bank++) { if (master_ctl & GEN11_GT_DW_IRQ(bank)) gen11_gt_bank_handler(gt, bank); } - spin_unlock(>->irq_lock); + spin_unlock(gt->irq_lock); } bool gen11_gt_reset_one_iir(struct intel_gt *gt, @@ -154,7 +173,7 @@ bool gen11_gt_reset_one_iir(struct intel_gt *gt, void __iomem * const regs = gt->uncore->regs; u32 dw; - lockdep_assert_held(>->irq_lock); + lockdep_assert_held(gt->irq_lock); dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); if (dw & BIT(bit)) { @@ -310,9 +329,9 @@ static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir) if (!HAS_L3_DPF(gt->i915)) return; - spin_lock(>->irq_lock); + spin_lock(gt->irq_lock); gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915)); - spin_unlock(>->irq_lock); + spin_unlock(gt->irq_lock); if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) gt->i915->l3_parity.which_slice |= 1 << 1; @@ -434,7 +453,7 @@ static void gen5_gt_update_irq(struct intel_gt *gt, u32 interrupt_mask, u32 enabled_irq_mask) { - lockdep_assert_held(>->irq_lock); + lockdep_assert_held(gt->irq_lock); GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c index 40bdd4cb629f..108b9e76c32e 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c @@ -504,8 +504,8 @@ void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p) drm_puts(p, "no P-state info available\n"); } - drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->cdclk.hw.cdclk); - drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->max_cdclk_freq); + drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk); + drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq); drm_printf(p, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq); intel_runtime_pm_put(uncore->rpm, wakeref); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c index 11060f5a4c89..52f2a28b2058 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c @@ -37,7 +37,7 @@ static void gen6_gt_pm_update_irq(struct intel_gt *gt, WARN_ON(enabled_irq_mask & ~interrupt_mask); - lockdep_assert_held(>->irq_lock); + lockdep_assert_held(gt->irq_lock); new_val = gt->pm_imr; new_val &= ~interrupt_mask; @@ -64,7 +64,7 @@ void gen6_gt_pm_reset_iir(struct intel_gt *gt, u32 reset_mask) struct intel_uncore *uncore = gt->uncore; i915_reg_t reg = GRAPHICS_VER(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; - lockdep_assert_held(>->irq_lock); + lockdep_assert_held(gt->irq_lock); intel_uncore_write(uncore, reg, reset_mask); intel_uncore_write(uncore, reg, reset_mask); @@ -92,7 +92,7 @@ static void write_pm_ier(struct intel_gt *gt) void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask) { - lockdep_assert_held(>->irq_lock); + lockdep_assert_held(gt->irq_lock); gt->pm_ier |= enable_mask; write_pm_ier(gt); @@ -101,7 +101,7 @@ void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask) void gen6_gt_pm_disable_irq(struct intel_gt *gt, u32 disable_mask) { - lockdep_assert_held(>->irq_lock); + lockdep_assert_held(gt->irq_lock); gt->pm_ier &= ~disable_mask; gen6_gt_pm_mask_irq(gt, disable_mask); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h index d414785003cc..2275ee47da95 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h @@ -1554,6 +1554,8 @@ #define OTHER_GTPM_INSTANCE 1 #define OTHER_KCR_INSTANCE 4 #define OTHER_GSC_INSTANCE 6 +#define OTHER_MEDIA_GUC_INSTANCE 16 +#define OTHER_MEDIA_GTPM_INSTANCE 17 #define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4)) @@ -1578,4 +1580,12 @@ #define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000) +/* + * Standalone Media's non-engine GT registers are located at their regular GT + * offsets plus 0x380000. This extra offset is stored inside the intel_uncore + * structure so that the existing code can be used for both GTs without + * modification. + */ +#define MTL_MEDIA_GSI_BASE 0x380000 + #endif /* __INTEL_GT_REGS__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index 4d56f7d5a3be..f19c2de77ff6 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -81,8 +81,17 @@ struct gt_defaults { u32 max_freq; }; +enum intel_gt_type { + GT_PRIMARY, + GT_TILE, + GT_MEDIA, +}; + struct intel_gt { struct drm_i915_private *i915; + const char *name; + enum intel_gt_type type; + struct intel_uncore *uncore; struct i915_ggtt *ggtt; @@ -132,6 +141,20 @@ struct intel_gt { struct intel_wakeref wakeref; atomic_t user_wakeref; + /** + * Protects access to lmem usefault list. + * It is required, if we are outside of the runtime suspend path, + * access to @lmem_userfault_list requires always first grabbing the + * runtime pm, to ensure we can't race against runtime suspend. + * Once we have that we also need to grab @lmem_userfault_lock, + * at which point we have exclusive access. + * The runtime suspend path is special since it doesn't really hold any locks, + * but instead has exclusive access by virtue of all other accesses requiring + * holding the runtime pm wakeref. + */ + struct mutex lmem_userfault_lock; + struct list_head lmem_userfault_list; + struct list_head closed_vma; spinlock_t closed_lock; /* guards the list of closed_vma */ @@ -147,6 +170,9 @@ struct intel_gt { */ intel_wakeref_t awake; + /* Manual runtime pm autosuspend delay for user GGTT/lmem mmaps */ + struct intel_wakeref_auto userfault_wakeref; + u32 clock_frequency; u32 clock_period_ns; @@ -154,7 +180,7 @@ struct intel_gt { struct intel_rc6 rc6; struct intel_rps rps; - spinlock_t irq_lock; + spinlock_t *irq_lock; u32 gt_imr; u32 pm_ier; u32 pm_imr; @@ -262,6 +288,14 @@ struct intel_gt { struct kobject *sysfs_defaults; }; +struct intel_gt_definition { + enum intel_gt_type type; + char *name; + u32 mapping_base; + u32 gsi_offset; + intel_engine_mask_t engine_mask; +}; + enum intel_gt_scratch_field { /* 8 bytes */ INTEL_GT_SCRATCH_FIELD_DEFAULT = 0, diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index b67831833c9a..2eaeba14319e 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -405,6 +405,9 @@ void free_scratch(struct i915_address_space *vm) { int i; + if (!vm->scratch[0]) + return; + for (i = 0; i <= vm->top; i++) i915_gem_object_put(vm->scratch[i]); } diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index e639434e97fd..c0ca53cba9f0 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -386,9 +386,6 @@ struct i915_ggtt { */ struct list_head userfault_list; - /* Manual runtime pm autosuspend delay for user GGTT mmaps */ - struct intel_wakeref_auto userfault_wakeref; - struct mutex error_mutex; struct drm_mm_node error_capture; struct drm_mm_node uc_fw; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 070cec4ff8a4..3955292483a6 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -662,6 +662,21 @@ static int lrc_ring_mi_mode(const struct intel_engine_cs *engine) return -1; } +static int lrc_ring_bb_offset(const struct intel_engine_cs *engine) +{ + if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) + return 0x80; + else if (GRAPHICS_VER(engine->i915) >= 12) + return 0x70; + else if (GRAPHICS_VER(engine->i915) >= 9) + return 0x64; + else if (GRAPHICS_VER(engine->i915) >= 8 && + engine->class == RENDER_CLASS) + return 0xc4; + else + return -1; +} + static int lrc_ring_gpr0(const struct intel_engine_cs *engine) { if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) @@ -768,6 +783,7 @@ static void init_common_regs(u32 * const regs, bool inhibit) { u32 ctl; + int loc; ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH); ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); @@ -779,6 +795,10 @@ static void init_common_regs(u32 * const regs, regs[CTX_CONTEXT_CONTROL] = ctl; regs[CTX_TIMESTAMP] = ce->stats.runtime.last; + + loc = lrc_ring_bb_offset(engine); + if (loc != -1) + regs[loc + 1] = 0; } static void init_wa_bb_regs(u32 * const regs, @@ -1278,7 +1298,8 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs) /* hsdes: 1809175790 */ if (!HAS_FLAT_CCS(ce->engine->i915)) - cs = gen12_emit_aux_table_inv(cs, GEN12_GFX_CCS_AUX_NV); + cs = gen12_emit_aux_table_inv(ce->engine->gt, + cs, GEN12_GFX_CCS_AUX_NV); /* Wa_16014892111 */ if (IS_DG2(ce->engine->i915)) @@ -1304,9 +1325,11 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs) /* hsdes: 1809175790 */ if (!HAS_FLAT_CCS(ce->engine->i915)) { if (ce->engine->class == VIDEO_DECODE_CLASS) - cs = gen12_emit_aux_table_inv(cs, GEN12_VD0_AUX_NV); + cs = gen12_emit_aux_table_inv(ce->engine->gt, + cs, GEN12_VD0_AUX_NV); else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS) - cs = gen12_emit_aux_table_inv(cs, GEN12_VE0_AUX_NV); + cs = gen12_emit_aux_table_inv(ce->engine->gt, + cs, GEN12_VE0_AUX_NV); } return cs; diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c index 6ee8d1127016..7ecfa672f738 100644 --- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c @@ -312,7 +312,7 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt, ppgtt->vm.gt = gt; ppgtt->vm.i915 = i915; ppgtt->vm.dma = i915->drm.dev; - ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size); + ppgtt->vm.total = BIT_ULL(RUNTIME_INFO(i915)->ppgtt_size); ppgtt->vm.lmem_pt_obj_flags = lmem_pt_obj_flags; dma_resv_init(&ppgtt->vm._resv); diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c index aa6aed837194..f3ad93db0b21 100644 --- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c +++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c @@ -4,8 +4,10 @@ */ #include "i915_drv.h" +#include "i915_pci.h" #include "i915_reg.h" #include "intel_memory_region.h" +#include "intel_pci_config.h" #include "intel_region_lmem.h" #include "intel_region_ttm.h" #include "gem/i915_gem_lmem.h" @@ -45,7 +47,6 @@ _resize_bar(struct drm_i915_private *i915, int resno, resource_size_t size) drm_info(&i915->drm, "BAR%d resized to %dM\n", resno, 1 << bar_size); } -#define LMEM_BAR_NUM 2 static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t lmem_size) { struct pci_dev *pdev = to_pci_dev(i915->drm.dev); @@ -56,15 +57,14 @@ static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t u32 pci_cmd; int i; - current_size = roundup_pow_of_two(pci_resource_len(pdev, LMEM_BAR_NUM)); + current_size = roundup_pow_of_two(pci_resource_len(pdev, GEN12_LMEM_BAR)); if (i915->params.lmem_bar_size) { u32 bar_sizes; rebar_size = i915->params.lmem_bar_size * (resource_size_t)SZ_1M; - bar_sizes = pci_rebar_get_possible_sizes(pdev, - LMEM_BAR_NUM); + bar_sizes = pci_rebar_get_possible_sizes(pdev, GEN12_LMEM_BAR); if (rebar_size == current_size) return; @@ -107,7 +107,7 @@ static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd & ~PCI_COMMAND_MEMORY); - _resize_bar(i915, LMEM_BAR_NUM, rebar_size); + _resize_bar(i915, GEN12_LMEM_BAR, rebar_size); pci_assign_unassigned_bus_resources(pdev->bus); pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd); @@ -202,6 +202,9 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt) if (!IS_DGFX(i915)) return ERR_PTR(-ENODEV); + if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR)) + return ERR_PTR(-ENXIO); + if (HAS_FLAT_CCS(i915)) { resource_size_t lmem_range; u64 tile_stolen, flat_ccs_base; @@ -236,8 +239,8 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt) mul_u32_u32(i915->params.lmem_size, SZ_1M)); } - io_start = pci_resource_start(pdev, 2); - io_size = min(pci_resource_len(pdev, 2), lmem_size); + io_start = pci_resource_start(pdev, GEN12_LMEM_BAR); + io_size = min(pci_resource_len(pdev, GEN12_LMEM_BAR), lmem_size); if (!io_size) return ERR_PTR(-EIO); diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 6fadde4ee7bf..6b86250c31ab 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -194,9 +194,9 @@ static void rps_enable_interrupts(struct intel_rps *rps) rps_reset_ei(rps); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); gen6_gt_pm_enable_irq(gt, rps->pm_events); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); intel_uncore_write(gt->uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq)); @@ -217,14 +217,14 @@ static void rps_reset_interrupts(struct intel_rps *rps) { struct intel_gt *gt = rps_to_gt(rps); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); if (GRAPHICS_VER(gt->i915) >= 11) gen11_rps_reset_interrupts(rps); else gen6_rps_reset_interrupts(rps); rps->pm_iir = 0; - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); } static void rps_disable_interrupts(struct intel_rps *rps) @@ -234,9 +234,9 @@ static void rps_disable_interrupts(struct intel_rps *rps) intel_uncore_write(gt->uncore, GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u)); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); intel_synchronize_irq(gt->i915); @@ -1797,10 +1797,10 @@ static void rps_work(struct work_struct *work) int new_freq, adj, min, max; u32 pm_iir = 0; - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events; client_boost = atomic_read(&rps->num_waiters); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); /* Make sure we didn't queue anything we're not going to process. */ if (!pm_iir && !client_boost) @@ -1873,9 +1873,9 @@ static void rps_work(struct work_struct *work) mutex_unlock(&rps->lock); out: - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); gen6_gt_pm_unmask_irq(gt, rps->pm_events); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); } void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) @@ -1883,7 +1883,7 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) struct intel_gt *gt = rps_to_gt(rps); const u32 events = rps->pm_events & pm_iir; - lockdep_assert_held(>->irq_lock); + lockdep_assert_held(gt->irq_lock); if (unlikely(!events)) return; @@ -1903,7 +1903,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) events = pm_iir & rps->pm_events; if (events) { - spin_lock(>->irq_lock); + spin_lock(gt->irq_lock); GT_TRACE(gt, "irq events:%x\n", events); @@ -1911,7 +1911,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) rps->pm_iir |= events; schedule_work(&rps->work); - spin_unlock(>->irq_lock); + spin_unlock(gt->irq_lock); } if (GRAPHICS_VER(gt->i915) >= 8) diff --git a/drivers/gpu/drm/i915/gt/intel_sa_media.c b/drivers/gpu/drm/i915/gt/intel_sa_media.c new file mode 100644 index 000000000000..e8f3d18c12b8 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_sa_media.c @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include <drm/drm_managed.h> + +#include "i915_drv.h" +#include "gt/intel_gt.h" +#include "gt/intel_sa_media.h" + +int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr, + u32 gsi_offset) +{ + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore; + + uncore = drmm_kzalloc(&i915->drm, sizeof(*uncore), GFP_KERNEL); + if (!uncore) + return -ENOMEM; + + uncore->gsi_offset = gsi_offset; + + gt->irq_lock = to_gt(i915)->irq_lock; + intel_gt_common_init_early(gt); + intel_uncore_init_early(uncore, gt); + + /* + * Standalone media shares the general MMIO space with the primary + * GT. We'll re-use the primary GT's mapping. + */ + uncore->regs = i915->uncore.regs; + if (drm_WARN_ON(&i915->drm, uncore->regs == NULL)) + return -EIO; + + gt->uncore = uncore; + gt->phys_addr = phys_addr; + + /* + * For current platforms we can assume there's only a single + * media GT and cache it for quick lookup. + */ + drm_WARN_ON(&i915->drm, i915->media_gt); + i915->media_gt = gt; + + return 0; +} diff --git a/drivers/gpu/drm/i915/gt/intel_sa_media.h b/drivers/gpu/drm/i915/gt/intel_sa_media.h new file mode 100644 index 000000000000..3afb310de932 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_sa_media.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ +#ifndef __INTEL_SA_MEDIA__ +#define __INTEL_SA_MEDIA__ + +#include <linux/types.h> + +struct intel_gt; + +int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr, + u32 gsi_offset); + +#endif /* __INTEL_SA_MEDIA_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c index c6d3050604c8..66f21c735d54 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -382,7 +382,6 @@ static void cherryview_sseu_info_init(struct intel_gt *gt) static void gen9_sseu_info_init(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; - struct intel_device_info *info = mkwrite_device_info(i915); struct sseu_dev_info *sseu = >->info.sseu; struct intel_uncore *uncore = gt->uncore; u32 fuse2, eu_disable, subslice_mask; @@ -471,10 +470,10 @@ static void gen9_sseu_info_init(struct intel_gt *gt) if (IS_GEN9_LP(i915)) { #define IS_SS_DISABLED(ss) (!(sseu->subslice_mask.hsw[0] & BIT(ss))) - info->has_pooled_eu = hweight8(sseu->subslice_mask.hsw[0]) == 3; + RUNTIME_INFO(i915)->has_pooled_eu = hweight8(sseu->subslice_mask.hsw[0]) == 3; sseu->min_eu_in_pool = 0; - if (info->has_pooled_eu) { + if (HAS_POOLED_EU(i915)) { if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0)) sseu->min_eu_in_pool = 3; else if (IS_SS_DISABLED(1)) diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 1109088fe8f6..82d3f8058995 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -27,6 +27,9 @@ #define NUM_GPR 16 #define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */ +#define LRI_HEADER MI_INSTR(0x22, 0) +#define LRI_LENGTH_MASK GENMASK(7, 0) + static struct i915_vma *create_scratch(struct intel_gt *gt) { return __vm_create_scratch_for_read_pinned(>->ggtt->vm, PAGE_SIZE); @@ -202,7 +205,7 @@ static int live_lrc_layout(void *arg) continue; } - if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) { + if ((lri & GENMASK(31, 23)) != LRI_HEADER) { pr_err("%s: Expected LRI command at dword %d, found %08x\n", engine->name, dw, lri); err = -EINVAL; @@ -357,6 +360,11 @@ static int live_lrc_fixed(void *arg) lrc_ring_cmd_buf_cctl(engine), "RING_CMD_BUF_CCTL" }, + { + i915_mmio_reg_offset(RING_BB_OFFSET(engine->mmio_base)), + lrc_ring_bb_offset(engine), + "RING_BB_OFFSET" + }, { }, }, *t; u32 *hw; @@ -987,18 +995,40 @@ store_context(struct intel_context *ce, struct i915_vma *scratch) hw = defaults; hw += LRC_STATE_OFFSET / sizeof(*hw); do { - u32 len = hw[dw] & 0x7f; + u32 len = hw[dw] & LRI_LENGTH_MASK; + + /* + * Keep it simple, skip parsing complex commands + * + * At present, there are no more MI_LOAD_REGISTER_IMM + * commands after the first 3D state command. Rather + * than include a table (see i915_cmd_parser.c) of all + * the possible commands and their instruction lengths + * (or mask for variable length instructions), assume + * we have gathered the complete list of registers and + * bail out. + */ + if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT) + break; if (hw[dw] == 0) { dw++; continue; } - if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) { + if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) { + /* Assume all other MI commands match LRI length mask */ dw += len + 2; continue; } + if (!len) { + pr_err("%s: invalid LRI found in context image\n", + ce->engine->name); + igt_hexdump(defaults, PAGE_SIZE); + break; + } + dw++; len = (len + 1) / 2; while (len--) { @@ -1150,18 +1180,29 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison) hw = defaults; hw += LRC_STATE_OFFSET / sizeof(*hw); do { - u32 len = hw[dw] & 0x7f; + u32 len = hw[dw] & LRI_LENGTH_MASK; + + /* For simplicity, break parsing at the first complex command */ + if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT) + break; if (hw[dw] == 0) { dw++; continue; } - if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) { + if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) { dw += len + 2; continue; } + if (!len) { + pr_err("%s: invalid LRI found in context image\n", + ce->engine->name); + igt_hexdump(defaults, PAGE_SIZE); + break; + } + dw++; len = (len + 1) / 2; *cs++ = MI_LOAD_REGISTER_IMM(len); @@ -1292,18 +1333,29 @@ static int compare_isolation(struct intel_engine_cs *engine, hw = defaults; hw += LRC_STATE_OFFSET / sizeof(*hw); do { - u32 len = hw[dw] & 0x7f; + u32 len = hw[dw] & LRI_LENGTH_MASK; + + /* For simplicity, break parsing at the first complex command */ + if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT) + break; if (hw[dw] == 0) { dw++; continue; } - if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) { + if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) { dw += len + 2; continue; } + if (!len) { + pr_err("%s: invalid LRI found in context image\n", + engine->name); + igt_hexdump(defaults, PAGE_SIZE); + break; + } + dw++; len = (len + 1) / 2; while (len--) { @@ -1343,6 +1395,30 @@ err_A0: return err; } +static struct i915_vma * +create_result_vma(struct i915_address_space *vm, unsigned long sz) +{ + struct i915_vma *vma; + void *ptr; + + vma = create_user_vma(vm, sz); + if (IS_ERR(vma)) + return vma; + + /* Set the results to a known value distinct from the poison */ + ptr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC); + if (IS_ERR(ptr)) { + i915_vma_put(vma); + return ERR_CAST(ptr); + } + + memset(ptr, POISON_INUSE, vma->size); + i915_gem_object_flush_map(vma->obj); + i915_gem_object_unpin_map(vma->obj); + + return vma; +} + static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison) { u32 *sema = memset32(engine->status_page.addr + 1000, 0, 1); @@ -1361,13 +1437,13 @@ static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison) goto err_A; } - ref[0] = create_user_vma(A->vm, SZ_64K); + ref[0] = create_result_vma(A->vm, SZ_64K); if (IS_ERR(ref[0])) { err = PTR_ERR(ref[0]); goto err_B; } - ref[1] = create_user_vma(A->vm, SZ_64K); + ref[1] = create_result_vma(A->vm, SZ_64K); if (IS_ERR(ref[1])) { err = PTR_ERR(ref[1]); goto err_ref0; @@ -1389,13 +1465,13 @@ static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison) } i915_request_put(rq); - result[0] = create_user_vma(A->vm, SZ_64K); + result[0] = create_result_vma(A->vm, SZ_64K); if (IS_ERR(result[0])) { err = PTR_ERR(result[0]); goto err_ref1; } - result[1] = create_user_vma(A->vm, SZ_64K); + result[1] = create_result_vma(A->vm, SZ_64K); if (IS_ERR(result[1])) { err = PTR_ERR(result[1]); goto err_result0; @@ -1408,18 +1484,17 @@ static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison) } err = poison_registers(B, poison, sema); - if (err) { - WRITE_ONCE(*sema, -1); - i915_request_put(rq); - goto err_result1; - } - - if (i915_request_wait(rq, 0, HZ / 2) < 0) { - i915_request_put(rq); + if (err == 0 && i915_request_wait(rq, 0, HZ / 2) < 0) { + pr_err("%s(%s): wait for results timed out\n", + __func__, engine->name); err = -ETIME; - goto err_result1; } + + /* Always cancel the semaphore wait, just in case the GPU gets stuck */ + WRITE_ONCE(*sema, -1); i915_request_put(rq); + if (err) + goto err_result1; err = compare_isolation(engine, ref, result, A, poison); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 24451d000a6a..bac06e3d6f2c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -82,9 +82,9 @@ static void gen9_reset_guc_interrupts(struct intel_guc *guc) assert_rpm_wakelock_held(>->i915->runtime_pm); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); gen6_gt_pm_reset_iir(gt, gt->pm_guc_events); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); } static void gen9_enable_guc_interrupts(struct intel_guc *guc) @@ -93,11 +93,11 @@ static void gen9_enable_guc_interrupts(struct intel_guc *guc) assert_rpm_wakelock_held(>->i915->runtime_pm); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) & gt->pm_guc_events); gen6_gt_pm_enable_irq(gt, gt->pm_guc_events); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); } static void gen9_disable_guc_interrupts(struct intel_guc *guc) @@ -106,11 +106,11 @@ static void gen9_disable_guc_interrupts(struct intel_guc *guc) assert_rpm_wakelock_held(>->i915->runtime_pm); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); gen6_gt_pm_disable_irq(gt, gt->pm_guc_events); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); intel_synchronize_irq(gt->i915); gen9_reset_guc_interrupts(guc); @@ -120,9 +120,9 @@ static void gen11_reset_guc_interrupts(struct intel_guc *guc) { struct intel_gt *gt = guc_to_gt(guc); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); gen11_gt_reset_one_iir(gt, 0, GEN11_GUC); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); } static void gen11_enable_guc_interrupts(struct intel_guc *guc) @@ -130,25 +130,25 @@ static void gen11_enable_guc_interrupts(struct intel_guc *guc) struct intel_gt *gt = guc_to_gt(guc); u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC)); intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, events); intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~events); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); } static void gen11_disable_guc_interrupts(struct intel_guc *guc) { struct intel_gt *gt = guc_to_gt(guc); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0); intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); intel_synchronize_irq(gt->i915); gen11_reset_guc_interrupts(guc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c index b071973ac41c..55d3ef93e86f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c @@ -36,24 +36,6 @@ struct guc_log_section { const char *name; }; -static s32 scale_log_param(struct intel_guc_log *log, const struct guc_log_section *section, - s32 param) -{ - /* -1 means default */ - if (param < 0) - return section->default_val; - - /* Check for 32-bit overflow */ - if (param >= SZ_4K) { - drm_err(&guc_to_gt(log_to_guc(log))->i915->drm, "Size too large for GuC %s log: %dMB!", - section->name, param); - return section->default_val; - } - - /* Param units are 1MB */ - return param * SZ_1M; -} - static void _guc_log_init_sizes(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); @@ -78,15 +60,10 @@ static void _guc_log_init_sizes(struct intel_guc_log *log) "capture", } }; - s32 params[GUC_LOG_SECTIONS_LIMIT] = { - GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE / SZ_1M, - GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE / SZ_1M, - GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE / SZ_1M, - }; int i; for (i = 0; i < GUC_LOG_SECTIONS_LIMIT; i++) - log->sizes[i].bytes = scale_log_param(log, sections + i, params[i]); + log->sizes[i].bytes = sections[i].default_val; /* If debug size > 1MB then bump default crash size to keep the same units */ if (log->sizes[GUC_LOG_SECTIONS_DEBUG].bytes >= SZ_1M && diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 64c4e83153f4..22ba66e48a9b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1438,7 +1438,12 @@ void intel_guc_busyness_park(struct intel_gt *gt) if (!guc_submission_initialized(guc)) return; - cancel_delayed_work(&guc->timestamp.work); + /* + * There is a race with suspend flow where the worker runs after suspend + * and causes an unclaimed register access warning. Cancel the worker + * synchronously here. + */ + cancel_delayed_work_sync(&guc->timestamp.work); /* * Before parking, we should sample engine busyness stats if we need to. @@ -1532,8 +1537,8 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc) __reset_guc_busyness_stats(guc); /* Flush IRQ handler */ - spin_lock_irq(&guc_to_gt(guc)->irq_lock); - spin_unlock_irq(&guc_to_gt(guc)->irq_lock); + spin_lock_irq(guc_to_gt(guc)->irq_lock); + spin_unlock_irq(guc_to_gt(guc)->irq_lock); guc_flush_submissions(guc); guc_flush_destroyed_contexts(guc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index abf4e142596d..dbd048b77e19 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -245,9 +245,9 @@ static int guc_enable_communication(struct intel_guc *guc) intel_guc_enable_interrupts(guc); /* check for CT messages received before we enabled interrupts */ - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); intel_guc_ct_event_handler(&guc->ct); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); drm_dbg(&i915->drm, "GuC communication enabled\n"); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index af425916cdf6..b91ad4aede1f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -72,12 +72,14 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, * security fixes, etc. to be enabled. */ #define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_maj, guc_mmp) \ - fw_def(DG2, 0, guc_mmp(dg2, 70, 4, 1)) \ + fw_def(DG2, 0, guc_maj(dg2, 70, 5)) \ + fw_def(ALDERLAKE_P, 0, guc_maj(adlp, 70, 5)) \ fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 70, 1, 1)) \ fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 69, 0, 3)) \ + fw_def(ALDERLAKE_S, 0, guc_maj(tgl, 70, 5)) \ fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 70, 1, 1)) \ fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 69, 0, 3)) \ - fw_def(DG1, 0, guc_mmp(dg1, 70, 1, 1)) \ + fw_def(DG1, 0, guc_maj(dg1, 70, 5)) \ fw_def(ROCKETLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \ fw_def(TIGERLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \ fw_def(JASPERLAKE, 0, guc_mmp(ehl, 70, 1, 1)) \ @@ -92,9 +94,11 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, fw_def(SKYLAKE, 0, guc_mmp(skl, 70, 1, 1)) #define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_raw, huc_mmp) \ + fw_def(ALDERLAKE_P, 0, huc_raw(tgl)) \ fw_def(ALDERLAKE_P, 0, huc_mmp(tgl, 7, 9, 3)) \ + fw_def(ALDERLAKE_S, 0, huc_raw(tgl)) \ fw_def(ALDERLAKE_S, 0, huc_mmp(tgl, 7, 9, 3)) \ - fw_def(DG1, 0, huc_mmp(dg1, 7, 9, 3)) \ + fw_def(DG1, 0, huc_raw(dg1)) \ fw_def(ROCKETLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \ fw_def(TIGERLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \ fw_def(JASPERLAKE, 0, huc_mmp(ehl, 9, 0, 0)) \ @@ -232,6 +236,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw) u32 fw_count; u8 rev = INTEL_REVID(i915); int i; + bool found; /* * The only difference between the ADL GuC FWs is the HWConfig support. @@ -246,6 +251,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw) fw_blobs = blobs_all[uc_fw->type].blobs; fw_count = blobs_all[uc_fw->type].count; + found = false; for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) { const struct uc_fw_blob *blob = &fw_blobs[i].blob; @@ -266,9 +272,15 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw) uc_fw->file_wanted.path = blob->path; uc_fw->file_wanted.major_ver = blob->major; uc_fw->file_wanted.minor_ver = blob->minor; + found = true; break; } + if (!found && uc_fw->file_selected.path) { + /* Failed to find a match for the last attempt?! */ + uc_fw->file_selected.path = NULL; + } + /* make sure the list is ordered as expected */ if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST) && !verified) { verified = true; @@ -322,7 +334,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw) continue; bad: - drm_err(&i915->drm, "\x1B[35;1mInvalid FW blob order: %s r%u %s%d.%d.%d comes before %s r%u %s%d.%d.%d\n", + drm_err(&i915->drm, "Invalid FW blob order: %s r%u %s%d.%d.%d comes before %s r%u %s%d.%d.%d\n", intel_platform_name(fw_blobs[i - 1].p), fw_blobs[i - 1].rev, fw_blobs[i - 1].blob.legacy ? "L" : "v", fw_blobs[i - 1].blob.major, @@ -553,10 +565,14 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev); memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal)); - if (!err || intel_uc_fw_is_overridden(uc_fw)) - goto done; + + /* Any error is terminal if overriding. Don't bother searching for older versions */ + if (err && intel_uc_fw_is_overridden(uc_fw)) + goto fail; while (err == -ENOENT) { + old_ver = true; + __uc_fw_auto_select(i915, uc_fw); if (!uc_fw->file_selected.path) { /* @@ -576,8 +592,6 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) if (err) goto fail; - old_ver = true; -done: if (uc_fw->loaded_via_gsc) err = check_gsc_manifest(fw, uc_fw); else diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index dad3a6054335..eef3bba8a41b 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -33,6 +33,7 @@ #include "i915_drv.h" #include "gvt.h" +#include "intel_pci_config.h" enum { INTEL_GVT_PCI_BAR_GTTMMIO = 0, @@ -353,9 +354,9 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size = - pci_resource_len(pdev, 0); + pci_resource_len(pdev, GTTMMADR_BAR); vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size = - pci_resource_len(pdev, 2); + pci_resource_len(pdev, GTT_APERTURE_BAR); memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4); diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index a30ba2d7b7ba..1b509c1a1e33 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -32,9 +32,10 @@ * */ +#include "display/intel_gmbus_regs.h" +#include "gvt.h" #include "i915_drv.h" #include "i915_reg.h" -#include "gvt.h" #define GMBUS1_TOTAL_BYTES_SHIFT 16 #define GMBUS1_TOTAL_BYTES_MASK 0x1ff diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index beea5895e499..e7e33f95cc51 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -498,7 +498,7 @@ static u32 bdw_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port) switch (wrpll_ctl & WRPLL_REF_MASK) { case WRPLL_REF_PCH_SSC: - refclk = vgpu->gvt->gt->i915->dpll.ref_clks.ssc; + refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.ssc; break; case WRPLL_REF_LCPLL: refclk = 2700000; @@ -529,7 +529,7 @@ out: static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port) { u32 dp_br = 0; - int refclk = vgpu->gvt->gt->i915->dpll.ref_clks.nssc; + int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc; enum dpio_phy phy = DPIO_PHY0; enum dpio_channel ch = DPIO_CH0; struct dpll clock = {0}; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 3a8450058548..ae987e92251d 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -66,8 +66,7 @@ static int i915_capabilities(struct seq_file *m, void *data) seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915)); - intel_device_info_print_static(INTEL_INFO(i915), &p); - intel_device_info_print_runtime(RUNTIME_INFO(i915), &p); + intel_device_info_print(INTEL_INFO(i915), RUNTIME_INFO(i915), &p); i915_print_iommu_status(i915, &p); intel_gt_info_print(&to_gt(i915)->info, &p); intel_driver_caps_print(&i915->caps, &p); @@ -411,7 +410,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data) seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", swizzle_string(to_gt(dev_priv)->ggtt->bit_6_swizzle_y)); - if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) + if (dev_priv->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) seq_puts(m, "L-shaped memory detected\n"); /* On BDW+, swizzling is not used. See detect_bit_6_swizzle() */ @@ -493,7 +492,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) seq_puts(m, "Runtime power management not supported\n"); seq_printf(m, "Runtime power status: %s\n", - str_enabled_disabled(!dev_priv->power_domains.init_wakeref)); + str_enabled_disabled(!dev_priv->display.power.domains.init_wakeref)); seq_printf(m, "GPU idle: %s\n", str_yes_no(!to_gt(dev_priv)->awake)); seq_printf(m, "IRQs disabled: %s\n", diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index deb8a8b76965..c459eb362c47 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -61,6 +61,7 @@ #include "display/intel_pps.h" #include "display/intel_sprite.h" #include "display/intel_vga.h" +#include "display/skl_watermark.h" #include "gem/i915_gem_context.h" #include "gem/i915_gem_create.h" @@ -105,6 +106,12 @@ static const char irst_name[] = "INT3392"; static const struct drm_driver i915_drm_driver; +static void i915_release_bridge_dev(struct drm_device *dev, + void *bridge) +{ + pci_dev_put(bridge); +} + static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) { int domain = pci_domain_nr(to_pci_dev(dev_priv->drm.dev)->bus); @@ -115,7 +122,9 @@ static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) drm_err(&dev_priv->drm, "bridge device not found\n"); return -EIO; } - return 0; + + return drmm_add_action_or_reset(&dev_priv->drm, i915_release_bridge_dev, + dev_priv->bridge_dev); } /* Allocate space for the MCH regs if needed, return nonzero on error */ @@ -252,8 +261,8 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv) if (dev_priv->wq == NULL) goto out_err; - dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); - if (dev_priv->hotplug.dp_wq == NULL) + dev_priv->display.hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); + if (dev_priv->display.hotplug.dp_wq == NULL) goto out_free_wq; return 0; @@ -268,7 +277,7 @@ out_err: static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) { - destroy_workqueue(dev_priv->hotplug.dp_wq); + destroy_workqueue(dev_priv->display.hotplug.dp_wq); destroy_workqueue(dev_priv->wq); } @@ -302,8 +311,13 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) static void sanitize_gpu(struct drm_i915_private *i915) { - if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) - __intel_gt_reset(to_gt(i915), ALL_ENGINES); + if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) { + struct intel_gt *gt; + unsigned int i; + + for_each_gt(gt, i915, i) + __intel_gt_reset(gt, ALL_ENGINES); + } } /** @@ -326,19 +340,19 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) intel_device_info_subplatform_init(dev_priv); intel_step_init(dev_priv); - intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug); + intel_uncore_mmio_debug_init_early(dev_priv); spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->gpu_error.lock); - mutex_init(&dev_priv->backlight_lock); + mutex_init(&dev_priv->display.backlight.lock); mutex_init(&dev_priv->sb_lock); cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE); - mutex_init(&dev_priv->audio.mutex); - mutex_init(&dev_priv->wm.wm_mutex); - mutex_init(&dev_priv->pps_mutex); - mutex_init(&dev_priv->hdcp_comp_mutex); + mutex_init(&dev_priv->display.audio.mutex); + mutex_init(&dev_priv->display.wm.wm_mutex); + mutex_init(&dev_priv->display.pps.mutex); + mutex_init(&dev_priv->display.hdcp.comp_mutex); i915_memcpy_init_early(dev_priv); intel_runtime_pm_init_early(&dev_priv->runtime_pm); @@ -357,7 +371,9 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) intel_wopcm_init_early(&dev_priv->wopcm); - intel_root_gt_init_early(dev_priv); + ret = intel_root_gt_init_early(dev_priv); + if (ret < 0) + goto err_rootgt; i915_drm_clients_init(&dev_priv->clients, dev_priv); @@ -382,6 +398,7 @@ err_gem: i915_gem_cleanup_early(dev_priv); intel_gt_driver_late_release_all(dev_priv); i915_drm_clients_fini(&dev_priv->clients); +err_rootgt: intel_region_ttm_device_fini(dev_priv); err_ttm: vlv_suspend_cleanup(dev_priv); @@ -423,7 +440,8 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv) */ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) { - int ret; + struct intel_gt *gt; + int ret, i; if (i915_inject_probe_failure(dev_priv)) return -ENODEV; @@ -432,17 +450,27 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) if (ret < 0) return ret; - ret = intel_uncore_init_mmio(&dev_priv->uncore); - if (ret) - return ret; + for_each_gt(gt, dev_priv, i) { + ret = intel_uncore_init_mmio(gt->uncore); + if (ret) + return ret; + + ret = drmm_add_action_or_reset(&dev_priv->drm, + intel_uncore_fini_mmio, + gt->uncore); + if (ret) + return ret; + } /* Try to make sure MCHBAR is enabled before poking at it */ intel_setup_mchbar(dev_priv); intel_device_info_runtime_init(dev_priv); - ret = intel_gt_init_mmio(to_gt(dev_priv)); - if (ret) - goto err_uncore; + for_each_gt(gt, dev_priv, i) { + ret = intel_gt_init_mmio(gt); + if (ret) + goto err_uncore; + } /* As early as possible, scrub existing GPU state before clobbering */ sanitize_gpu(dev_priv); @@ -451,8 +479,6 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) err_uncore: intel_teardown_mchbar(dev_priv); - intel_uncore_fini_mmio(&dev_priv->uncore); - pci_dev_put(dev_priv->bridge_dev); return ret; } @@ -464,8 +490,6 @@ err_uncore: static void i915_driver_mmio_release(struct drm_i915_private *dev_priv) { intel_teardown_mchbar(dev_priv); - intel_uncore_fini_mmio(&dev_priv->uncore); - pci_dev_put(dev_priv->bridge_dev); } /** @@ -715,6 +739,8 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) static void i915_driver_register(struct drm_i915_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; + struct intel_gt *gt; + unsigned int i; i915_gem_driver_register(dev_priv); i915_pmu_register(dev_priv); @@ -734,7 +760,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) /* Depends on sysfs having been initialized */ i915_perf_register(dev_priv); - intel_gt_driver_register(to_gt(dev_priv)); + for_each_gt(gt, dev_priv, i) + intel_gt_driver_register(gt); intel_display_driver_register(dev_priv); @@ -753,6 +780,9 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) */ static void i915_driver_unregister(struct drm_i915_private *dev_priv) { + struct intel_gt *gt; + unsigned int i; + i915_switcheroo_unregister(dev_priv); intel_unregister_dsm_handler(); @@ -762,7 +792,8 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) intel_display_driver_unregister(dev_priv); - intel_gt_driver_unregister(to_gt(dev_priv)); + for_each_gt(gt, dev_priv, i) + intel_gt_driver_unregister(gt); i915_perf_unregister(dev_priv); i915_pmu_unregister(dev_priv); @@ -784,6 +815,8 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv) { if (drm_debug_enabled(DRM_UT_DRIVER)) { struct drm_printer p = drm_debug_printer("i915 device info:"); + struct intel_gt *gt; + unsigned int i; drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n", INTEL_DEVID(dev_priv), @@ -793,10 +826,11 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv) INTEL_INFO(dev_priv)->platform), GRAPHICS_VER(dev_priv)); - intel_device_info_print_static(INTEL_INFO(dev_priv), &p); - intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p); + intel_device_info_print(INTEL_INFO(dev_priv), + RUNTIME_INFO(dev_priv), &p); i915_print_iommu_status(dev_priv, &p); - intel_gt_info_print(&to_gt(dev_priv)->info, &p); + for_each_gt(gt, dev_priv, i) + intel_gt_info_print(>->info, &p); } if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) @@ -814,6 +848,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) const struct intel_device_info *match_info = (struct intel_device_info *)ent->driver_data; struct intel_device_info *device_info; + struct intel_runtime_info *runtime; struct drm_i915_private *i915; i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver, @@ -829,7 +864,11 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) /* Setup the write-once "constant" device info */ device_info = mkwrite_device_info(i915); memcpy(device_info, match_info, sizeof(*device_info)); - RUNTIME_INFO(i915)->device_id = pdev->device; + + /* Initialize initial runtime info from static const data and pdev. */ + runtime = RUNTIME_INFO(i915); + memcpy(runtime, &INTEL_INFO(i915)->__runtime, sizeof(*runtime)); + runtime->device_id = pdev->device; return i915; } @@ -948,7 +987,9 @@ out_fini: void i915_driver_remove(struct drm_i915_private *i915) { - disable_rpm_wakeref_asserts(&i915->runtime_pm); + intel_wakeref_t wakeref; + + wakeref = intel_runtime_pm_get(&i915->runtime_pm); i915_driver_unregister(i915); @@ -972,18 +1013,19 @@ void i915_driver_remove(struct drm_i915_private *i915) i915_driver_hw_remove(i915); - enable_rpm_wakeref_asserts(&i915->runtime_pm); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); } static void i915_driver_release(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; + intel_wakeref_t wakeref; if (!dev_priv->do_release) return; - disable_rpm_wakeref_asserts(rpm); + wakeref = intel_runtime_pm_get(rpm); i915_gem_driver_release(dev_priv); @@ -994,7 +1036,8 @@ static void i915_driver_release(struct drm_device *dev) i915_driver_mmio_release(dev_priv); - enable_rpm_wakeref_asserts(rpm); + intel_runtime_pm_put(rpm, wakeref); + intel_runtime_pm_driver_release(rpm); i915_driver_late_release(dev_priv); @@ -1206,13 +1249,15 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; - int ret; + struct intel_gt *gt; + int ret, i; disable_rpm_wakeref_asserts(rpm); i915_gem_suspend_late(dev_priv); - intel_uncore_suspend(&dev_priv->uncore); + for_each_gt(gt, dev_priv, i) + intel_uncore_suspend(gt->uncore); intel_power_domains_suspend(dev_priv, get_suspend_mode(dev_priv, hibernation)); @@ -1344,7 +1389,8 @@ static int i915_drm_resume_early(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); - int ret; + struct intel_gt *gt; + int ret, i; /* * We have a resume ordering issue with the snd-hda driver also @@ -1398,9 +1444,10 @@ static int i915_drm_resume_early(struct drm_device *dev) drm_err(&dev_priv->drm, "Resume prepare failed: %d, continuing anyway\n", ret); - intel_uncore_resume_early(&dev_priv->uncore); - - intel_gt_check_and_clear_faults(to_gt(dev_priv)); + for_each_gt(gt, dev_priv, i) { + intel_uncore_resume_early(gt->uncore); + intel_gt_check_and_clear_faults(gt); + } intel_display_power_resume_early(dev_priv); @@ -1580,7 +1627,8 @@ static int intel_runtime_suspend(struct device *kdev) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; - int ret; + struct intel_gt *gt; + int ret, i; if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) return -ENODEV; @@ -1595,11 +1643,13 @@ static int intel_runtime_suspend(struct device *kdev) */ i915_gem_runtime_suspend(dev_priv); - intel_gt_runtime_suspend(to_gt(dev_priv)); + for_each_gt(gt, dev_priv, i) + intel_gt_runtime_suspend(gt); intel_runtime_pm_disable_interrupts(dev_priv); - intel_uncore_suspend(&dev_priv->uncore); + for_each_gt(gt, dev_priv, i) + intel_uncore_suspend(gt->uncore); intel_display_power_suspend(dev_priv); @@ -1663,7 +1713,8 @@ static int intel_runtime_resume(struct device *kdev) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; - int ret; + struct intel_gt *gt; + int ret, i; if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) return -ENODEV; @@ -1683,7 +1734,8 @@ static int intel_runtime_resume(struct device *kdev) ret = vlv_resume_prepare(dev_priv, true); - intel_uncore_runtime_resume(&dev_priv->uncore); + for_each_gt(gt, dev_priv, i) + intel_uncore_runtime_resume(gt->uncore); intel_runtime_pm_enable_interrupts(dev_priv); @@ -1691,7 +1743,8 @@ static int intel_runtime_resume(struct device *kdev) * No point of rolling back things in case of an error, as the best * we can do is to hope that things will still work (and disable RPM). */ - intel_gt_runtime_resume(to_gt(dev_priv)); + for_each_gt(gt, dev_priv, i) + intel_gt_runtime_resume(gt); /* * On VLV/CHV display interrupts are part of the display @@ -1703,7 +1756,7 @@ static int intel_runtime_resume(struct device *kdev) intel_hpd_poll_disable(dev_priv); } - intel_enable_ipc(dev_priv); + skl_watermark_ipc_update(dev_priv); enable_rpm_wakeref_asserts(rpm); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7c0a34a33fec..bdc81db76dbd 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -34,20 +34,10 @@ #include <linux/pm_qos.h> -#include <drm/drm_connector.h> #include <drm/ttm/ttm_device.h> -#include "display/intel_cdclk.h" #include "display/intel_display.h" -#include "display/intel_display_power.h" -#include "display/intel_dmc.h" -#include "display/intel_dpll_mgr.h" -#include "display/intel_dsb.h" -#include "display/intel_fbc.h" -#include "display/intel_frontbuffer.h" -#include "display/intel_global_state.h" -#include "display/intel_gmbus.h" -#include "display/intel_opregion.h" +#include "display/intel_display_core.h" #include "gem/i915_gem_context_types.h" #include "gem/i915_gem_lmem.h" @@ -70,80 +60,24 @@ #include "intel_device_info.h" #include "intel_memory_region.h" #include "intel_pch.h" -#include "intel_pm_types.h" #include "intel_runtime_pm.h" #include "intel_step.h" #include "intel_uncore.h" #include "intel_wopcm.h" -struct dpll; struct drm_i915_clock_gating_funcs; struct drm_i915_gem_object; struct drm_i915_private; -struct intel_atomic_state; -struct intel_audio_funcs; -struct intel_cdclk_config; -struct intel_cdclk_funcs; -struct intel_cdclk_state; -struct intel_cdclk_vals; -struct intel_color_funcs; struct intel_connector; -struct intel_crtc; struct intel_dp; -struct intel_dpll_funcs; struct intel_encoder; -struct intel_fbdev; -struct intel_fdi_funcs; -struct intel_gmbus; -struct intel_hotplug_funcs; -struct intel_initial_plane_config; struct intel_limit; -struct intel_overlay; struct intel_overlay_error_state; struct vlv_s0ix_state; /* Threshold == 5 for long IRQs, 50 for short */ #define HPD_STORM_DEFAULT_THRESHOLD 50 -struct i915_hotplug { - struct delayed_work hotplug_work; - - const u32 *hpd, *pch_hpd; - - struct { - unsigned long last_jiffies; - int count; - enum { - HPD_ENABLED = 0, - HPD_DISABLED = 1, - HPD_MARK_DISABLED = 2 - } state; - } stats[HPD_NUM_PINS]; - u32 event_bits; - u32 retry_bits; - struct delayed_work reenable_work; - - u32 long_port_mask; - u32 short_port_mask; - struct work_struct dig_port_work; - - struct work_struct poll_init_work; - bool poll_enabled; - - unsigned int hpd_storm_threshold; - /* Whether or not to count short HPD IRQs in HPD storms */ - u8 hpd_short_storm_enabled; - - /* - * if we get a HPD irq from DP and a HPD irq from non-DP - * the non-DP HPD could block the workqueue on a mode config - * mutex getting, that userspace may have taken. However - * userspace is waiting on the DP workqueue to run which is - * blocked behind the non-DP one. - */ - struct workqueue_struct *dp_wq; -}; - #define I915_GEM_GPU_DOMAINS \ (I915_GEM_DOMAIN_RENDER | \ I915_GEM_DOMAIN_SAMPLER | \ @@ -151,55 +85,9 @@ struct i915_hotplug { I915_GEM_DOMAIN_INSTRUCTION | \ I915_GEM_DOMAIN_VERTEX) -struct sdvo_device_mapping { - u8 initialized; - u8 dvo_port; - u8 slave_addr; - u8 dvo_wiring; - u8 i2c_pin; - u8 ddc_pin; -}; - -/* functions used for watermark calcs for display. */ -struct drm_i915_wm_disp_funcs { - /* update_wm is for legacy wm management */ - void (*update_wm)(struct drm_i915_private *dev_priv); - int (*compute_pipe_wm)(struct intel_atomic_state *state, - struct intel_crtc *crtc); - int (*compute_intermediate_wm)(struct intel_atomic_state *state, - struct intel_crtc *crtc); - void (*initial_watermarks)(struct intel_atomic_state *state, - struct intel_crtc *crtc); - void (*atomic_update_watermarks)(struct intel_atomic_state *state, - struct intel_crtc *crtc); - void (*optimize_watermarks)(struct intel_atomic_state *state, - struct intel_crtc *crtc); - int (*compute_global_watermarks)(struct intel_atomic_state *state); -}; - -struct drm_i915_display_funcs { - /* Returns the active state of the crtc, and if the crtc is active, - * fills out the pipe-config with the hw state. */ - bool (*get_pipe_config)(struct intel_crtc *, - struct intel_crtc_state *); - void (*get_initial_plane_config)(struct intel_crtc *, - struct intel_initial_plane_config *); - void (*crtc_enable)(struct intel_atomic_state *state, - struct intel_crtc *crtc); - void (*crtc_disable)(struct intel_atomic_state *state, - struct intel_crtc *crtc); - void (*commit_modeset_enables)(struct intel_atomic_state *state); -}; - #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */ -#define QUIRK_LVDS_SSC_DISABLE (1<<1) -#define QUIRK_INVERT_BRIGHTNESS (1<<2) -#define QUIRK_BACKLIGHT_PRESENT (1<<3) -#define QUIRK_PIN_SWIZZLED_PAGES (1<<5) -#define QUIRK_INCREASE_T12_DELAY (1<<6) -#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7) -#define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8) +#define GEM_QUIRK_PIN_SWIZZLED_PAGES BIT(0) struct i915_suspend_saved_registers { u32 saveDSPARB; @@ -289,51 +177,8 @@ i915_fence_timeout(const struct drm_i915_private *i915) return i915_fence_context_timeout(i915, U64_MAX); } -/* Amount of SAGV/QGV points, BSpec precisely defines this */ -#define I915_NUM_QGV_POINTS 8 - #define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915)) -/* Amount of PSF GV points, BSpec precisely defines this */ -#define I915_NUM_PSF_GV_POINTS 3 - -struct intel_vbt_data { - /* bdb version */ - u16 version; - - /* Feature bits */ - unsigned int int_tv_support:1; - unsigned int int_crt_support:1; - unsigned int lvds_use_ssc:1; - unsigned int int_lvds_support:1; - unsigned int display_clock_mode:1; - unsigned int fdi_rx_polarity_inverted:1; - int lvds_ssc_freq; - enum drm_panel_orientation orientation; - - bool override_afc_startup; - u8 override_afc_startup_val; - - int crt_ddc_pin; - - struct list_head display_devices; - struct list_head bdb_blocks; - - struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */ - struct sdvo_device_mapping sdvo_mappings[2]; -}; - -struct i915_frontbuffer_tracking { - spinlock_t lock; - - /* - * Tracking bits for delayed frontbuffer flushing du to gpu activity or - * scheduled flips. - */ - unsigned busy_bits; - unsigned flip_bits; -}; - struct i915_virtual_gpu { struct mutex lock; /* serialises sending of g2v_notify command pkts */ bool active; @@ -348,32 +193,11 @@ struct i915_selftest_stash { struct ida mock_region_instances; }; -/* intel_audio.c private */ -struct intel_audio_private { - /* Display internal audio functions */ - const struct intel_audio_funcs *funcs; - - /* hda/i915 audio component */ - struct i915_audio_component *component; - bool component_registered; - /* mutex for audio/video sync */ - struct mutex mutex; - int power_refcount; - u32 freq_cntrl; - - /* Used to save the pipe-to-encoder mapping for audio */ - struct intel_encoder *encoder_map[I915_MAX_PIPES]; - - /* necessary resource sharing with HDMI LPE audio driver. */ - struct { - struct platform_device *platdev; - int irq; - } lpe; -}; - struct drm_i915_private { struct drm_device drm; + struct intel_display display; + /* FIXME: Device release actions should all be moved to drmm_ */ bool do_release; @@ -417,27 +241,6 @@ struct drm_i915_private { struct intel_wopcm wopcm; - struct intel_dmc dmc; - - struct intel_gmbus *gmbus[GMBUS_NUM_PINS]; - - /** gmbus_mutex protects against concurrent usage of the single hw gmbus - * controller on different i2c buses. */ - struct mutex gmbus_mutex; - - /** - * Base address of where the gmbus and gpio blocks are located (either - * on PCH or on SoC for platforms without PCH). - */ - u32 gpio_mmio_base; - - /* MMIO base address for MIPI regs */ - u32 mipi_mmio_base; - - u32 pps_mmio_base; - - wait_queue_head_t gmbus_wait_queue; - struct pci_dev *bridge_dev; struct rb_root uabi_engines; @@ -461,48 +264,15 @@ struct drm_i915_private { }; u32 pipestat_irq_mask[I915_MAX_PIPES]; - struct i915_hotplug hotplug; - struct intel_fbc *fbc[I915_MAX_FBCS]; - struct intel_opregion opregion; - struct intel_vbt_data vbt; - bool preserve_bios_swizzle; - /* overlay */ - struct intel_overlay *overlay; - - /* backlight registers and fields in struct intel_panel */ - struct mutex backlight_lock; - - /* protects panel power sequencer state */ - struct mutex pps_mutex; - unsigned int fsb_freq, mem_freq, is_ddr3; unsigned int skl_preferred_vco_freq; - unsigned int max_cdclk_freq; unsigned int max_dotclk_freq; unsigned int hpll_freq; - unsigned int fdi_pll_freq; unsigned int czclk_freq; - struct { - /* The current hardware cdclk configuration */ - struct intel_cdclk_config hw; - - /* cdclk, divider, and ratio table from bspec */ - const struct intel_cdclk_vals *table; - - struct intel_global_obj obj; - } cdclk; - - struct { - /* The current hardware dbuf configuration */ - u8 enabled_slices; - - struct intel_global_obj obj; - } dbuf; - /** * wq - Driver workqueue for GEM. * @@ -512,40 +282,14 @@ struct drm_i915_private { */ struct workqueue_struct *wq; - /* ordered wq for modesets */ - struct workqueue_struct *modeset_wq; - /* unbound hipri wq for page flips/plane updates */ - struct workqueue_struct *flip_wq; - /* pm private clock gating functions */ const struct drm_i915_clock_gating_funcs *clock_gating_funcs; - /* pm display functions */ - const struct drm_i915_wm_disp_funcs *wm_disp; - - /* irq display functions */ - const struct intel_hotplug_funcs *hotplug_funcs; - - /* fdi display functions */ - const struct intel_fdi_funcs *fdi_funcs; - - /* display pll funcs */ - const struct intel_dpll_funcs *dpll_funcs; - - /* Display functions */ - const struct drm_i915_display_funcs *display; - - /* Display internal color functions */ - const struct intel_color_funcs *color_funcs; - - /* Display CDCLK functions */ - const struct intel_cdclk_funcs *cdclk_funcs; - /* PCH chipset type */ enum intel_pch pch_type; unsigned short pch_id; - unsigned long quirks; + unsigned long gem_quirks; struct drm_atomic_state *modeset_restore_state; struct drm_modeset_acquire_ctx reset_ctx; @@ -554,34 +298,8 @@ struct drm_i915_private { /* Kernel Modesetting */ - /** - * dpll and cdclk state is protected by connection_mutex - * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll. - * Must be global rather than per dpll, because on some platforms plls - * share registers. - */ - struct { - struct mutex lock; - - int num_shared_dpll; - struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; - const struct intel_dpll_mgr *mgr; - - struct { - int nssc; - int ssc; - } ref_clks; - } dpll; - struct list_head global_obj_list; - struct i915_frontbuffer_tracking fb_tracking; - - struct intel_atomic_helper { - struct llist_head free_list; - struct work_struct free_work; - } atomic_helper; - bool mchbar_need_disable; struct intel_l3_parity l3_parity; @@ -600,21 +318,8 @@ struct drm_i915_private { */ u32 edram_size_mb; - struct i915_power_domains power_domains; - struct i915_gpu_error gpu_error; - /* list of fbdev register on this device */ - struct intel_fbdev *fbdev; - struct work_struct fbdev_suspend_work; - - struct drm_property *broadcast_rgb_property; - struct drm_property *force_audio_property; - - u32 fdi_rx_config; - - /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ - u32 chv_phy_control; /* * Shadows for CHV DPLL_MD regs to keep the state * checker somewhat working in the presence hardware @@ -627,51 +332,6 @@ struct drm_i915_private { struct i915_suspend_saved_registers regfile; struct vlv_s0ix_state *vlv_s0ix_state; - enum { - I915_SAGV_UNKNOWN = 0, - I915_SAGV_DISABLED, - I915_SAGV_ENABLED, - I915_SAGV_NOT_CONTROLLED - } sagv_status; - - u32 sagv_block_time_us; - - struct { - /* - * Raw watermark latency values: - * in 0.1us units for WM0, - * in 0.5us units for WM1+. - */ - /* primary */ - u16 pri_latency[5]; - /* sprite */ - u16 spr_latency[5]; - /* cursor */ - u16 cur_latency[5]; - /* - * Raw watermark memory latency values - * for SKL for all 8 levels - * in 1us units. - */ - u16 skl_latency[8]; - - /* current hardware state */ - union { - struct ilk_wm_values hw; - struct vlv_wm_values vlv; - struct g4x_wm_values g4x; - }; - - u8 max_level; - - /* - * Should be held around atomic WM register writing; also - * protects * intel_crtc->wm.active and - * crtc_state->wm.need_postvbl_update. - */ - struct mutex wm_mutex; - } wm; - struct dram_info { bool wm_lv_0_adjust_needed; u8 num_channels; @@ -689,18 +349,6 @@ struct drm_i915_private { u8 num_psf_gv_points; } dram_info; - struct intel_bw_info { - /* for each QGV point */ - unsigned int deratedbw[I915_NUM_QGV_POINTS]; - /* for each PSF GV point */ - unsigned int psf_bw[I915_NUM_PSF_GV_POINTS]; - u8 num_qgv_points; - u8 num_psf_gv_points; - u8 num_planes; - } max_bw[6]; - - struct intel_global_obj bw_obj; - struct intel_runtime_pm runtime_pm; struct i915_perf perf; @@ -716,6 +364,9 @@ struct drm_i915_private { struct kobject *sysfs_gt; + /* Quick lookup of media GT (current platforms only have one) */ + struct intel_gt *media_gt; + struct { struct i915_gem_contexts { spinlock_t lock; /* locks list */ @@ -733,9 +384,6 @@ struct drm_i915_private { struct file *mmap_singleton; } gem; - /* Window2 specifies time required to program DSB (Window2) in number of scan lines */ - u8 window2_delay; - u8 pch_ssc_use; /* For i915gm/i945gm vblank irq workaround */ @@ -743,31 +391,16 @@ struct drm_i915_private { bool irq_enabled; - union { - /* perform PHY state sanity checks? */ - bool chv_phy_assert[2]; - - /* - * DG2: Mask of PHYs that were not calibrated by the firmware - * and should not be used. - */ - u8 snps_phy_failed_calibration; - }; - - bool ipc_enabled; - - struct intel_audio_private audio; + /* + * DG2: Mask of PHYs that were not calibrated by the firmware + * and should not be used. + */ + u8 snps_phy_failed_calibration; struct i915_pmu pmu; struct i915_drm_clients clients; - struct i915_hdcp_comp_master *hdcp_master; - bool hdcp_comp_added; - - /* Mutex to protect the above hdcp component related values. */ - struct mutex hdcp_comp_mutex; - /* The TTM device structure. */ struct ttm_device bdev; @@ -826,28 +459,6 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915) (engine__) && (engine__)->uabi_class == (class__); \ (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node))) -#define I915_GTT_OFFSET_NONE ((u32)-1) - -/* - * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is - * considered to be the frontbuffer for the given plane interface-wise. This - * doesn't mean that the hw necessarily already scans it out, but that any - * rendering (by the cpu or gpu) will land in the frontbuffer eventually. - * - * We have one bit per pipe and per scanout plane type. - */ -#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 -#define INTEL_FRONTBUFFER(pipe, plane_id) ({ \ - BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \ - BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \ - BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \ -}) -#define INTEL_FRONTBUFFER_OVERLAY(pipe) \ - BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) -#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ - GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \ - INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) - #define INTEL_INFO(dev_priv) (&(dev_priv)->__info) #define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime) #define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps) @@ -856,19 +467,19 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915) #define IP_VER(ver, rel) ((ver) << 8 | (rel)) -#define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics.ver) -#define GRAPHICS_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->graphics.ver, \ - INTEL_INFO(i915)->graphics.rel) +#define GRAPHICS_VER(i915) (RUNTIME_INFO(i915)->graphics.ip.ver) +#define GRAPHICS_VER_FULL(i915) IP_VER(RUNTIME_INFO(i915)->graphics.ip.ver, \ + RUNTIME_INFO(i915)->graphics.ip.rel) #define IS_GRAPHICS_VER(i915, from, until) \ (GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until)) -#define MEDIA_VER(i915) (INTEL_INFO(i915)->media.ver) -#define MEDIA_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->media.ver, \ - INTEL_INFO(i915)->media.rel) +#define MEDIA_VER(i915) (RUNTIME_INFO(i915)->media.ip.ver) +#define MEDIA_VER_FULL(i915) IP_VER(RUNTIME_INFO(i915)->media.ip.ver, \ + RUNTIME_INFO(i915)->media.ip.rel) #define IS_MEDIA_VER(i915, from, until) \ (MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until)) -#define DISPLAY_VER(i915) (INTEL_INFO(i915)->display.ver) +#define DISPLAY_VER(i915) (RUNTIME_INFO(i915)->display.ip.ver) #define IS_DISPLAY_VER(i915, from, until) \ (DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until)) @@ -1210,7 +821,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv) -#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type) +#define INTEL_PPGTT(dev_priv) (RUNTIME_INFO(dev_priv)->ppgtt_type) #define HAS_PPGTT(dev_priv) \ (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE) #define HAS_FULL_PPGTT(dev_priv) \ @@ -1218,7 +829,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \ GEM_BUG_ON((sizes) == 0); \ - ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \ + ((sizes) & ~RUNTIME_INFO(dev_priv)->page_sizes) == 0; \ }) #define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay) @@ -1249,13 +860,15 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug) #define HAS_FW_BLC(dev_priv) (DISPLAY_VER(dev_priv) > 2) -#define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.fbc_mask != 0) +#define HAS_FBC(dev_priv) (RUNTIME_INFO(dev_priv)->fbc_mask != 0) #define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) >= 7) #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) #define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst) -#define HAS_DP20(dev_priv) (IS_DG2(dev_priv)) +#define HAS_DP20(dev_priv) (IS_DG2(dev_priv) || DISPLAY_VER(dev_priv) >= 14) + +#define HAS_DOUBLE_BUFFERED_M_N(dev_priv) (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) #define HAS_CDCLK_CRAWL(dev_priv) (INTEL_INFO(dev_priv)->display.has_cdclk_crawl) #define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi) @@ -1264,7 +877,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_PSR_HW_TRACKING(dev_priv) \ (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking) #define HAS_PSR2_SEL_FETCH(dev_priv) (DISPLAY_VER(dev_priv) >= 12) -#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->display.cpu_transcoder_mask & BIT(trans)) != 0) +#define HAS_TRANSCODER(dev_priv, trans) ((RUNTIME_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0) #define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6) #define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p) @@ -1272,7 +885,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps) -#define HAS_DMC(dev_priv) (INTEL_INFO(dev_priv)->display.has_dmc) +#define HAS_DMC(dev_priv) (RUNTIME_INFO(dev_priv)->has_dmc) #define HAS_HECI_PXP(dev_priv) \ (INTEL_INFO(dev_priv)->has_heci_pxp) @@ -1302,9 +915,11 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc) -#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i)) +#define HAS_REGION(i915, i) (RUNTIME_INFO(i915)->memory_regions & (i)) #define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM) +#define HAS_EXTRA_GT_LIST(dev_priv) (INTEL_INFO(dev_priv)->extra_gt_list) + /* * Platform has the dedicated compression control state for each lmem surfaces * stored in lmem to support the 3D and media compression formats. @@ -1313,7 +928,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc) -#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu) +#define HAS_POOLED_EU(dev_priv) (RUNTIME_INFO(dev_priv)->has_pooled_eu) #define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs) @@ -1335,9 +950,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define GT_FREQUENCY_MULTIPLIER 50 #define GEN9_FREQ_SCALER 3 -#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->display.pipe_mask)) +#define INTEL_NUM_PIPES(dev_priv) (hweight8(RUNTIME_INFO(dev_priv)->pipe_mask)) -#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->display.pipe_mask != 0) +#define HAS_DISPLAY(dev_priv) (RUNTIME_INFO(dev_priv)->pipe_mask != 0) #define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11) @@ -1355,85 +970,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \ IS_ALDERLAKE_S(dev_priv)) -#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915)) +#define HAS_MBUS_JOINING(i915) (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) #define HAS_3D_PIPELINE(i915) (INTEL_INFO(i915)->has_3d_pipeline) #define HAS_ONE_EU_PER_FUSE_BIT(i915) (INTEL_INFO(i915)->has_one_eu_per_fuse_bit) -/* i915_gem.c */ -void i915_gem_init_early(struct drm_i915_private *dev_priv); -void i915_gem_cleanup_early(struct drm_i915_private *dev_priv); - -static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) -{ - /* - * A single pass should suffice to release all the freed objects (along - * most call paths) , but be a little more paranoid in that freeing - * the objects does take a little amount of time, during which the rcu - * callbacks could have added new objects into the freed list, and - * armed the work again. - */ - while (atomic_read(&i915->mm.free_count)) { - flush_work(&i915->mm.free_work); - flush_delayed_work(&i915->bdev.wq); - rcu_barrier(); - } -} - -static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915) -{ - /* - * Similar to objects above (see i915_gem_drain_freed-objects), in - * general we have workers that are armed by RCU and then rearm - * themselves in their callbacks. To be paranoid, we need to - * drain the workqueue a second time after waiting for the RCU - * grace period so that we catch work queued via RCU from the first - * pass. As neither drain_workqueue() nor flush_workqueue() report - * a result, we make an assumption that we only don't require more - * than 3 passes to catch all _recursive_ RCU delayed work. - * - */ - int pass = 3; - do { - flush_workqueue(i915->wq); - rcu_barrier(); - i915_gem_drain_freed_objects(i915); - } while (--pass); - drain_workqueue(i915->wq); -} - -struct i915_vma * __must_check -i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj, - struct i915_gem_ww_ctx *ww, - const struct i915_gtt_view *view, - u64 size, u64 alignment, u64 flags); - -struct i915_vma * __must_check -i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, - const struct i915_gtt_view *view, - u64 size, u64 alignment, u64 flags); - -int i915_gem_object_unbind(struct drm_i915_gem_object *obj, - unsigned long flags); -#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0) -#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1) -#define I915_GEM_OBJECT_UNBIND_TEST BIT(2) -#define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3) -#define I915_GEM_OBJECT_UNBIND_ASYNC BIT(4) - -void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); - -int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); - -int __must_check i915_gem_init(struct drm_i915_private *dev_priv); -void i915_gem_driver_register(struct drm_i915_private *i915); -void i915_gem_driver_unregister(struct drm_i915_private *i915); -void i915_gem_driver_remove(struct drm_i915_private *dev_priv); -void i915_gem_driver_release(struct drm_i915_private *dev_priv); - -int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); - /* intel_device_info.c */ static inline struct intel_device_info * mkwrite_device_info(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a3373699835d..f18cc6270b2b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -842,6 +842,10 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915) &to_gt(i915)->ggtt->userfault_list, userfault_link) __i915_gem_object_release_mmap_gtt(obj); + list_for_each_entry_safe(obj, on, + &to_gt(i915)->lmem_userfault_list, userfault_link) + i915_gem_object_runtime_pm_release_mmap_offset(obj); + /* * The fence will be lost when the device powers down. If any were * in use by hardware (i.e. they are pinned), we should not be powering @@ -1035,7 +1039,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, if (i915_gem_object_has_pages(obj) && i915_gem_object_is_tiled(obj) && - i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { + i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) { if (obj->mm.madv == I915_MADV_WILLNEED) { GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj)); i915_gem_object_clear_tiling_quirk(obj); @@ -1085,14 +1089,50 @@ out: return err; } +/* + * A single pass should suffice to release all the freed objects (along most + * call paths), but be a little more paranoid in that freeing the objects does + * take a little amount of time, during which the rcu callbacks could have added + * new objects into the freed list, and armed the work again. + */ +void i915_gem_drain_freed_objects(struct drm_i915_private *i915) +{ + while (atomic_read(&i915->mm.free_count)) { + flush_work(&i915->mm.free_work); + flush_delayed_work(&i915->bdev.wq); + rcu_barrier(); + } +} + +/* + * Similar to objects above (see i915_gem_drain_freed-objects), in general we + * have workers that are armed by RCU and then rearm themselves in their + * callbacks. To be paranoid, we need to drain the workqueue a second time after + * waiting for the RCU grace period so that we catch work queued via RCU from + * the first pass. As neither drain_workqueue() nor flush_workqueue() report a + * result, we make an assumption that we only don't require more than 3 passes + * to catch all _recursive_ RCU delayed work. + */ +void i915_gem_drain_workqueue(struct drm_i915_private *i915) +{ + int i; + + for (i = 0; i < 3; i++) { + flush_workqueue(i915->wq); + rcu_barrier(); + i915_gem_drain_freed_objects(i915); + } + + drain_workqueue(i915->wq); +} + int i915_gem_init(struct drm_i915_private *dev_priv) { int ret; /* We need to fallback to 4K pages if host doesn't support huge gtt. */ if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv)) - mkwrite_device_info(dev_priv)->page_sizes = - I915_GTT_PAGE_SIZE_4K; + RUNTIME_INFO(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K; ret = i915_gem_init_userptr(dev_priv); if (ret) @@ -1173,7 +1213,7 @@ void i915_gem_driver_unregister(struct drm_i915_private *i915) void i915_gem_driver_remove(struct drm_i915_private *dev_priv) { - intel_wakeref_auto_fini(&to_gt(dev_priv)->ggtt->userfault_wakeref); + intel_wakeref_auto_fini(&to_gt(dev_priv)->userfault_wakeref); i915_gem_suspend_late(dev_priv); intel_gt_driver_remove(to_gt(dev_priv)); @@ -1213,7 +1253,7 @@ void i915_gem_init_early(struct drm_i915_private *dev_priv) i915_gem_init__mm(dev_priv); i915_gem_init__contexts(dev_priv); - spin_lock_init(&dev_priv->fb_tracking.lock); + spin_lock_init(&dev_priv->display.fb_tracking.lock); } void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index 68d8d52bd541..a5cdf6662d01 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h @@ -26,12 +26,55 @@ #define __I915_GEM_H__ #include <linux/bug.h> +#include <linux/types.h> #include <drm/drm_drv.h> #include "i915_utils.h" +struct drm_file; +struct drm_i915_gem_object; struct drm_i915_private; +struct i915_gem_ww_ctx; +struct i915_gtt_view; +struct i915_vma; + +void i915_gem_init_early(struct drm_i915_private *i915); +void i915_gem_cleanup_early(struct drm_i915_private *i915); + +void i915_gem_drain_freed_objects(struct drm_i915_private *i915); +void i915_gem_drain_workqueue(struct drm_i915_private *i915); + +struct i915_vma * __must_check +i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj, + struct i915_gem_ww_ctx *ww, + const struct i915_gtt_view *view, + u64 size, u64 alignment, u64 flags); + +struct i915_vma * __must_check +i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, + const struct i915_gtt_view *view, + u64 size, u64 alignment, u64 flags); + +int i915_gem_object_unbind(struct drm_i915_gem_object *obj, + unsigned long flags); +#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0) +#define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1) +#define I915_GEM_OBJECT_UNBIND_TEST BIT(2) +#define I915_GEM_OBJECT_UNBIND_VM_TRYLOCK BIT(3) +#define I915_GEM_OBJECT_UNBIND_ASYNC BIT(4) + +void i915_gem_runtime_suspend(struct drm_i915_private *i915); + +int __must_check i915_gem_init(struct drm_i915_private *i915); +void i915_gem_driver_register(struct drm_i915_private *i915); +void i915_gem_driver_unregister(struct drm_i915_private *i915); +void i915_gem_driver_remove(struct drm_i915_private *i915); +void i915_gem_driver_release(struct drm_i915_private *i915); + +int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); + +/* FIXME: All of the below belong somewhere else. */ #ifdef CONFIG_DRM_I915_DEBUG_GEM diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c index 6fd15b39570c..342c8ca6414e 100644 --- a/drivers/gpu/drm/i915/i915_getparam.c +++ b/drivers/gpu/drm/i915/i915_getparam.c @@ -36,7 +36,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, value = to_gt(i915)->ggtt->num_fences; break; case I915_PARAM_HAS_OVERLAY: - value = !!i915->overlay; + value = !!i915->display.overlay; break; case I915_PARAM_HAS_BSD: value = !!intel_engine_lookup_user(i915, diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 16d8b7ba65dc..9ea2fe34e7d3 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -646,8 +646,7 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m, { struct drm_printer p = i915_error_printer(m); - intel_device_info_print_static(&error->device_info, &p); - intel_device_info_print_runtime(&error->runtime_info, &p); + intel_device_info_print(&error->device_info, &error->runtime_info, &p); intel_driver_caps_print(&error->driver_caps, &p); } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 783a6ca41a61..86a42d9e8041 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -185,7 +185,7 @@ static const u32 hpd_sde_dg1[HPD_NUM_PINS] = { static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) { - struct i915_hotplug *hpd = &dev_priv->hotplug; + struct intel_hotplug *hpd = &dev_priv->display.hotplug; if (HAS_GMCH(dev_priv)) { if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || @@ -595,7 +595,7 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv, static bool i915_has_asle(struct drm_i915_private *dev_priv) { - if (!dev_priv->opregion.asle) + if (!dev_priv->display.opregion.asle) return false; return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); @@ -1104,9 +1104,9 @@ static void ivb_parity_work(struct work_struct *work) out: drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv)); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); mutex_unlock(&dev_priv->drm.struct_mutex); } @@ -1272,7 +1272,7 @@ static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, u32 enabled_irqs = 0; for_each_intel_encoder(&dev_priv->drm, encoder) - if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) + if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) enabled_irqs |= hpd[encoder->hpd_pin]; return enabled_irqs; @@ -1304,12 +1304,12 @@ static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915, static void gmbus_irq_handler(struct drm_i915_private *dev_priv) { - wake_up_all(&dev_priv->gmbus_wait_queue); + wake_up_all(&dev_priv->display.gmbus.wait_queue); } static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) { - wake_up_all(&dev_priv->gmbus_wait_queue); + wake_up_all(&dev_priv->display.gmbus.wait_queue); } #if defined(CONFIG_DEBUG_FS) @@ -1637,7 +1637,7 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, if (hotplug_trigger) { intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, hotplug_trigger, - dev_priv->hotplug.hpd, + dev_priv->display.hotplug.hpd, i9xx_port_hotplug_long_detect); intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); @@ -1841,7 +1841,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, - dev_priv->hotplug.pch_hpd, + dev_priv->display.hotplug.pch_hpd, pch_port_hotplug_long_detect); intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); @@ -1986,7 +1986,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, ddi_hotplug_trigger, dig_hotplug_reg, - dev_priv->hotplug.pch_hpd, + dev_priv->display.hotplug.pch_hpd, icp_ddi_port_hotplug_long_detect); } @@ -1998,7 +1998,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, tc_hotplug_trigger, dig_hotplug_reg, - dev_priv->hotplug.pch_hpd, + dev_priv->display.hotplug.pch_hpd, icp_tc_port_hotplug_long_detect); } @@ -2024,7 +2024,7 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, - dev_priv->hotplug.pch_hpd, + dev_priv->display.hotplug.pch_hpd, spt_port_hotplug_long_detect); } @@ -2036,7 +2036,7 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug2_trigger, dig_hotplug_reg, - dev_priv->hotplug.pch_hpd, + dev_priv->display.hotplug.pch_hpd, spt_port_hotplug2_long_detect); } @@ -2057,7 +2057,7 @@ static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, - dev_priv->hotplug.hpd, + dev_priv->display.hotplug.hpd, ilk_port_hotplug_long_detect); intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); @@ -2237,7 +2237,7 @@ static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, - dev_priv->hotplug.hpd, + dev_priv->display.hotplug.hpd, bxt_port_hotplug_long_detect); intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); @@ -2257,7 +2257,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc, dig_hotplug_reg, - dev_priv->hotplug.hpd, + dev_priv->display.hotplug.hpd, gen11_port_hotplug_long_detect); } @@ -2269,7 +2269,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt, dig_hotplug_reg, - dev_priv->hotplug.hpd, + dev_priv->display.hotplug.hpd, gen11_port_hotplug_long_detect); } @@ -2653,9 +2653,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) } static u32 -gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl) +gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl) { - void __iomem * const regs = gt->uncore->regs; + void __iomem * const regs = i915->uncore.regs; u32 iir; if (!(master_ctl & GEN11_GU_MISC_IRQ)) @@ -2669,10 +2669,10 @@ gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl) } static void -gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir) +gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir) { if (iir & GEN11_GU_MISC_GSE) - intel_opregion_asle_intr(gt->i915); + intel_opregion_asle_intr(i915); } static inline u32 gen11_master_intr_disable(void __iomem * const regs) @@ -2736,11 +2736,11 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) if (master_ctl & GEN11_DISPLAY_IRQ) gen11_display_irq_handler(i915); - gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl); + gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); gen11_master_intr_enable(regs); - gen11_gu_misc_irq_handler(gt, gu_misc_iir); + gen11_gu_misc_irq_handler(i915, gu_misc_iir); pmu_irq_stats(i915, IRQ_HANDLED); @@ -2801,11 +2801,11 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) if (master_ctl & GEN11_DISPLAY_IRQ) gen11_display_irq_handler(i915); - gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl); + gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); dg1_master_intr_enable(regs); - gen11_gu_misc_irq_handler(gt, gu_misc_iir); + gen11_gu_misc_irq_handler(i915, gu_misc_iir); pmu_irq_stats(i915, IRQ_HANDLED); @@ -3313,8 +3313,8 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); @@ -3383,8 +3383,8 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP) intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); @@ -3460,8 +3460,8 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) u32 hotplug_irqs, enabled_irqs; u32 val; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR); val &= ~hotplug_irqs; @@ -3538,8 +3538,8 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); @@ -3578,8 +3578,8 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); if (DISPLAY_VER(dev_priv) >= 8) bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); @@ -3636,8 +3636,8 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); @@ -4370,8 +4370,8 @@ HPD_FUNCS(ilk); void intel_hpd_irq_setup(struct drm_i915_private *i915) { - if (i915->display_irqs_enabled && i915->hotplug_funcs) - i915->hotplug_funcs->hpd_irq_setup(i915); + if (i915->display_irqs_enabled && i915->display.funcs.hotplug) + i915->display.funcs.hotplug->hpd_irq_setup(i915); } /** @@ -4413,33 +4413,33 @@ void intel_irq_init(struct drm_i915_private *dev_priv) if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) dev_priv->display_irqs_enabled = false; - dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; + dev_priv->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; /* If we have MST support, we want to avoid doing short HPD IRQ storm * detection, as short HPD storms will occur as a natural part of * sideband messaging with MST. * On older platforms however, IRQ storms can occur with both long and * short pulses, as seen on some G4x systems. */ - dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv); + dev_priv->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv); if (HAS_GMCH(dev_priv)) { if (I915_HAS_HOTPLUG(dev_priv)) - dev_priv->hotplug_funcs = &i915_hpd_funcs; + dev_priv->display.funcs.hotplug = &i915_hpd_funcs; } else { if (HAS_PCH_DG2(dev_priv)) - dev_priv->hotplug_funcs = &icp_hpd_funcs; + dev_priv->display.funcs.hotplug = &icp_hpd_funcs; else if (HAS_PCH_DG1(dev_priv)) - dev_priv->hotplug_funcs = &dg1_hpd_funcs; + dev_priv->display.funcs.hotplug = &dg1_hpd_funcs; else if (DISPLAY_VER(dev_priv) >= 11) - dev_priv->hotplug_funcs = &gen11_hpd_funcs; + dev_priv->display.funcs.hotplug = &gen11_hpd_funcs; else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - dev_priv->hotplug_funcs = &bxt_hpd_funcs; + dev_priv->display.funcs.hotplug = &bxt_hpd_funcs; else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - dev_priv->hotplug_funcs = &icp_hpd_funcs; + dev_priv->display.funcs.hotplug = &icp_hpd_funcs; else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) - dev_priv->hotplug_funcs = &spt_hpd_funcs; + dev_priv->display.funcs.hotplug = &spt_hpd_funcs; else - dev_priv->hotplug_funcs = &ilk_hpd_funcs; + dev_priv->display.funcs.hotplug = &ilk_hpd_funcs; } } diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 59a579ed03bb..cd4487a1d3be 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -26,16 +26,22 @@ #include <drm/drm_drv.h> #include <drm/i915_pciids.h> +#include "gt/intel_gt_regs.h" +#include "gt/intel_sa_media.h" + #include "i915_driver.h" #include "i915_drv.h" #include "i915_pci.h" #include "i915_reg.h" +#include "intel_pci_config.h" #define PLATFORM(x) .platform = (x) #define GEN(x) \ - .graphics.ver = (x), \ - .media.ver = (x), \ - .display.ver = (x) + .__runtime.graphics.ip.ver = (x), \ + .__runtime.media.ip.ver = (x), \ + .__runtime.display.ip.ver = (x) + +#define NO_DISPLAY .__runtime.pipe_mask = 0 #define I845_PIPE_OFFSETS \ .display.pipe_offsets = { \ @@ -159,16 +165,16 @@ /* Keep in gen based order, and chronological order within a gen */ #define GEN_DEFAULT_PAGE_SIZES \ - .page_sizes = I915_GTT_PAGE_SIZE_4K + .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K #define GEN_DEFAULT_REGIONS \ - .memory_regions = REGION_SMEM | REGION_STOLEN_SMEM + .__runtime.memory_regions = REGION_SMEM | REGION_STOLEN_SMEM #define I830_FEATURES \ GEN(2), \ .is_mobile = 1, \ - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_overlay = 1, \ .display.cursor_needs_physical = 1, \ .display.overlay_needs_physical = 1, \ @@ -177,7 +183,7 @@ .has_3d_pipeline = 1, \ .hws_needs_physical = 1, \ .unfenced_needs_alignment = 1, \ - .platform_engine_mask = BIT(RCS0), \ + .__runtime.platform_engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = false, \ .dma_mask_size = 32, \ @@ -189,8 +195,8 @@ #define I845_FEATURES \ GEN(2), \ - .display.pipe_mask = BIT(PIPE_A), \ - .display.cpu_transcoder_mask = BIT(TRANSCODER_A), \ + .__runtime.pipe_mask = BIT(PIPE_A), \ + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A), \ .display.has_overlay = 1, \ .display.overlay_needs_physical = 1, \ .display.has_gmch = 1, \ @@ -198,7 +204,7 @@ .gpu_reset_clobbers_display = true, \ .hws_needs_physical = 1, \ .unfenced_needs_alignment = 1, \ - .platform_engine_mask = BIT(RCS0), \ + .__runtime.platform_engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = false, \ .dma_mask_size = 32, \ @@ -221,22 +227,22 @@ static const struct intel_device_info i845g_info = { static const struct intel_device_info i85x_info = { I830_FEATURES, PLATFORM(INTEL_I85X), - .display.fbc_mask = BIT(INTEL_FBC_A), + .__runtime.fbc_mask = BIT(INTEL_FBC_A), }; static const struct intel_device_info i865g_info = { I845_FEATURES, PLATFORM(INTEL_I865G), - .display.fbc_mask = BIT(INTEL_FBC_A), + .__runtime.fbc_mask = BIT(INTEL_FBC_A), }; #define GEN3_FEATURES \ GEN(3), \ - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ - .platform_engine_mask = BIT(RCS0), \ + .__runtime.platform_engine_mask = BIT(RCS0), \ .has_3d_pipeline = 1, \ .has_snoop = true, \ .has_coherent_ggtt = true, \ @@ -266,7 +272,7 @@ static const struct intel_device_info i915gm_info = { .display.has_overlay = 1, .display.overlay_needs_physical = 1, .display.supports_tv = 1, - .display.fbc_mask = BIT(INTEL_FBC_A), + .__runtime.fbc_mask = BIT(INTEL_FBC_A), .hws_needs_physical = 1, .unfenced_needs_alignment = 1, }; @@ -291,7 +297,7 @@ static const struct intel_device_info i945gm_info = { .display.has_overlay = 1, .display.overlay_needs_physical = 1, .display.supports_tv = 1, - .display.fbc_mask = BIT(INTEL_FBC_A), + .__runtime.fbc_mask = BIT(INTEL_FBC_A), .hws_needs_physical = 1, .unfenced_needs_alignment = 1, }; @@ -323,12 +329,12 @@ static const struct intel_device_info pnv_m_info = { #define GEN4_FEATURES \ GEN(4), \ - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ - .platform_engine_mask = BIT(RCS0), \ + .__runtime.platform_engine_mask = BIT(RCS0), \ .has_3d_pipeline = 1, \ .has_snoop = true, \ .has_coherent_ggtt = true, \ @@ -351,7 +357,7 @@ static const struct intel_device_info i965gm_info = { GEN4_FEATURES, PLATFORM(INTEL_I965GM), .is_mobile = 1, - .display.fbc_mask = BIT(INTEL_FBC_A), + .__runtime.fbc_mask = BIT(INTEL_FBC_A), .display.has_overlay = 1, .display.supports_tv = 1, .hws_needs_physical = 1, @@ -361,7 +367,7 @@ static const struct intel_device_info i965gm_info = { static const struct intel_device_info g45_info = { GEN4_FEATURES, PLATFORM(INTEL_G45), - .platform_engine_mask = BIT(RCS0) | BIT(VCS0), + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0), .gpu_reset_clobbers_display = false, }; @@ -369,18 +375,18 @@ static const struct intel_device_info gm45_info = { GEN4_FEATURES, PLATFORM(INTEL_GM45), .is_mobile = 1, - .display.fbc_mask = BIT(INTEL_FBC_A), + .__runtime.fbc_mask = BIT(INTEL_FBC_A), .display.supports_tv = 1, - .platform_engine_mask = BIT(RCS0) | BIT(VCS0), + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0), .gpu_reset_clobbers_display = false, }; #define GEN5_FEATURES \ GEN(5), \ - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ - .platform_engine_mask = BIT(RCS0) | BIT(VCS0), \ + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0), \ .has_3d_pipeline = 1, \ .has_snoop = true, \ .has_coherent_ggtt = true, \ @@ -403,16 +409,16 @@ static const struct intel_device_info ilk_m_info = { PLATFORM(INTEL_IRONLAKE), .is_mobile = 1, .has_rps = true, - .display.fbc_mask = BIT(INTEL_FBC_A), + .__runtime.fbc_mask = BIT(INTEL_FBC_A), }; #define GEN6_FEATURES \ GEN(6), \ - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ - .display.fbc_mask = BIT(INTEL_FBC_A), \ - .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ + .__runtime.fbc_mask = BIT(INTEL_FBC_A), \ + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ .has_3d_pipeline = 1, \ .has_coherent_ggtt = true, \ .has_llc = 1, \ @@ -420,8 +426,8 @@ static const struct intel_device_info ilk_m_info = { .has_rc6p = 1, \ .has_rps = true, \ .dma_mask_size = 40, \ - .ppgtt_type = INTEL_PPGTT_ALIASING, \ - .ppgtt_size = 31, \ + .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, \ + .__runtime.ppgtt_size = 31, \ I9XX_PIPE_OFFSETS, \ I9XX_CURSOR_OFFSETS, \ ILK_COLORS, \ @@ -460,11 +466,11 @@ static const struct intel_device_info snb_m_gt2_info = { #define GEN7_FEATURES \ GEN(7), \ - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \ + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \ .display.has_hotplug = 1, \ - .display.fbc_mask = BIT(INTEL_FBC_A), \ - .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ + .__runtime.fbc_mask = BIT(INTEL_FBC_A), \ + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ .has_3d_pipeline = 1, \ .has_coherent_ggtt = true, \ .has_llc = 1, \ @@ -473,8 +479,8 @@ static const struct intel_device_info snb_m_gt2_info = { .has_reset_engine = true, \ .has_rps = true, \ .dma_mask_size = 40, \ - .ppgtt_type = INTEL_PPGTT_ALIASING, \ - .ppgtt_size = 31, \ + .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, \ + .__runtime.ppgtt_size = 31, \ IVB_PIPE_OFFSETS, \ IVB_CURSOR_OFFSETS, \ IVB_COLORS, \ @@ -515,9 +521,8 @@ static const struct intel_device_info ivb_m_gt2_info = { static const struct intel_device_info ivb_q_info = { GEN7_FEATURES, PLATFORM(INTEL_IVYBRIDGE), + NO_DISPLAY, .gt = 2, - .display.pipe_mask = 0, /* legal, last one wins */ - .display.cpu_transcoder_mask = 0, .has_l3_dpf = 1, }; @@ -525,8 +530,8 @@ static const struct intel_device_info vlv_info = { PLATFORM(INTEL_VALLEYVIEW), GEN(7), .is_lp = 1, - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), .has_runtime_pm = 1, .has_rc6 = 1, .has_reset_engine = true, @@ -534,11 +539,11 @@ static const struct intel_device_info vlv_info = { .display.has_gmch = 1, .display.has_hotplug = 1, .dma_mask_size = 40, - .ppgtt_type = INTEL_PPGTT_ALIASING, - .ppgtt_size = 31, + .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, + .__runtime.ppgtt_size = 31, .has_snoop = true, .has_coherent_ggtt = false, - .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), .display.mmio_offset = VLV_DISPLAY_BASE, I9XX_PIPE_OFFSETS, I9XX_CURSOR_OFFSETS, @@ -549,8 +554,8 @@ static const struct intel_device_info vlv_info = { #define G75_FEATURES \ GEN7_FEATURES, \ - .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \ .display.has_ddi = 1, \ .display.has_fpga_dbg = 1, \ @@ -584,8 +589,8 @@ static const struct intel_device_info hsw_gt3_info = { GEN(8), \ .has_logical_ring_contexts = 1, \ .dma_mask_size = 39, \ - .ppgtt_type = INTEL_PPGTT_FULL, \ - .ppgtt_size = 48, \ + .__runtime.ppgtt_type = INTEL_PPGTT_FULL, \ + .__runtime.ppgtt_size = 48, \ .has_64bit_reloc = 1 #define BDW_PLATFORM \ @@ -613,18 +618,18 @@ static const struct intel_device_info bdw_rsvd_info = { static const struct intel_device_info bdw_gt3_info = { BDW_PLATFORM, .gt = 3, - .platform_engine_mask = + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), }; static const struct intel_device_info chv_info = { PLATFORM(INTEL_CHERRYVIEW), GEN(8), - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), .display.has_hotplug = 1, .is_lp = 1, - .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), .has_64bit_reloc = 1, .has_runtime_pm = 1, .has_rc6 = 1, @@ -632,8 +637,8 @@ static const struct intel_device_info chv_info = { .has_logical_ring_contexts = 1, .display.has_gmch = 1, .dma_mask_size = 39, - .ppgtt_type = INTEL_PPGTT_FULL, - .ppgtt_size = 32, + .__runtime.ppgtt_type = INTEL_PPGTT_FULL, + .__runtime.ppgtt_size = 32, .has_reset_engine = 1, .has_snoop = true, .has_coherent_ggtt = false, @@ -646,16 +651,16 @@ static const struct intel_device_info chv_info = { }; #define GEN9_DEFAULT_PAGE_SIZES \ - .page_sizes = I915_GTT_PAGE_SIZE_4K | \ - I915_GTT_PAGE_SIZE_64K + .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K | \ + I915_GTT_PAGE_SIZE_64K #define GEN9_FEATURES \ GEN8_FEATURES, \ GEN(9), \ GEN9_DEFAULT_PAGE_SIZES, \ - .display.has_dmc = 1, \ + .__runtime.has_dmc = 1, \ .has_gt_uc = 1, \ - .display.has_hdcp = 1, \ + .__runtime.has_hdcp = 1, \ .display.has_ipc = 1, \ .display.has_psr = 1, \ .display.has_psr_hw_tracking = 1, \ @@ -678,7 +683,7 @@ static const struct intel_device_info skl_gt2_info = { #define SKL_GT3_PLUS_PLATFORM \ SKL_PLATFORM, \ - .platform_engine_mask = \ + .__runtime.platform_engine_mask = \ BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1) @@ -697,29 +702,29 @@ static const struct intel_device_info skl_gt4_info = { .is_lp = 1, \ .display.dbuf.slice_mask = BIT(DBUF_S1), \ .display.has_hotplug = 1, \ - .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \ BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \ .has_3d_pipeline = 1, \ .has_64bit_reloc = 1, \ .display.has_ddi = 1, \ .display.has_fpga_dbg = 1, \ - .display.fbc_mask = BIT(INTEL_FBC_A), \ - .display.has_hdcp = 1, \ + .__runtime.fbc_mask = BIT(INTEL_FBC_A), \ + .__runtime.has_hdcp = 1, \ .display.has_psr = 1, \ .display.has_psr_hw_tracking = 1, \ .has_runtime_pm = 1, \ - .display.has_dmc = 1, \ + .__runtime.has_dmc = 1, \ .has_rc6 = 1, \ .has_rps = true, \ .display.has_dp_mst = 1, \ .has_logical_ring_contexts = 1, \ .has_gt_uc = 1, \ .dma_mask_size = 39, \ - .ppgtt_type = INTEL_PPGTT_FULL, \ - .ppgtt_size = 48, \ + .__runtime.ppgtt_type = INTEL_PPGTT_FULL, \ + .__runtime.ppgtt_size = 48, \ .has_reset_engine = 1, \ .has_snoop = true, \ .has_coherent_ggtt = false, \ @@ -739,7 +744,7 @@ static const struct intel_device_info bxt_info = { static const struct intel_device_info glk_info = { GEN9_LP_FEATURES, PLATFORM(INTEL_GEMINILAKE), - .display.ver = 10, + .__runtime.display.ip.ver = 10, .display.dbuf.size = 1024 - 4, /* 4 blocks for bypass path allocation */ GLK_COLORS, }; @@ -761,7 +766,7 @@ static const struct intel_device_info kbl_gt2_info = { static const struct intel_device_info kbl_gt3_info = { KBL_PLATFORM, .gt = 3, - .platform_engine_mask = + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), }; @@ -782,7 +787,7 @@ static const struct intel_device_info cfl_gt2_info = { static const struct intel_device_info cfl_gt3_info = { CFL_PLATFORM, .gt = 3, - .platform_engine_mask = + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), }; @@ -801,15 +806,15 @@ static const struct intel_device_info cml_gt2_info = { }; #define GEN11_DEFAULT_PAGE_SIZES \ - .page_sizes = I915_GTT_PAGE_SIZE_4K | \ - I915_GTT_PAGE_SIZE_64K | \ - I915_GTT_PAGE_SIZE_2M + .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K | \ + I915_GTT_PAGE_SIZE_64K | \ + I915_GTT_PAGE_SIZE_2M #define GEN11_FEATURES \ GEN9_FEATURES, \ GEN11_DEFAULT_PAGE_SIZES, \ .display.abox_mask = BIT(0), \ - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \ .display.pipe_offsets = { \ @@ -832,37 +837,37 @@ static const struct intel_device_info cml_gt2_info = { ICL_COLORS, \ .display.dbuf.size = 2048, \ .display.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \ - .display.has_dsc = 1, \ + .__runtime.has_dsc = 1, \ .has_coherent_ggtt = false, \ .has_logical_ring_elsq = 1 static const struct intel_device_info icl_info = { GEN11_FEATURES, PLATFORM(INTEL_ICELAKE), - .platform_engine_mask = + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), }; static const struct intel_device_info ehl_info = { GEN11_FEATURES, PLATFORM(INTEL_ELKHARTLAKE), - .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0), - .ppgtt_size = 36, + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0), + .__runtime.ppgtt_size = 36, }; static const struct intel_device_info jsl_info = { GEN11_FEATURES, PLATFORM(INTEL_JASPERLAKE), - .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0), - .ppgtt_size = 36, + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0), + .__runtime.ppgtt_size = 36, }; #define GEN12_FEATURES \ GEN11_FEATURES, \ GEN(12), \ .display.abox_mask = GENMASK(2, 1), \ - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \ .display.pipe_offsets = { \ @@ -890,7 +895,7 @@ static const struct intel_device_info tgl_info = { GEN12_FEATURES, PLATFORM(INTEL_TIGERLAKE), .display.has_modular_fia = 1, - .platform_engine_mask = + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), }; @@ -898,17 +903,17 @@ static const struct intel_device_info rkl_info = { GEN12_FEATURES, PLATFORM(INTEL_ROCKETLAKE), .display.abox_mask = BIT(0), - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), .display.has_hti = 1, .display.has_psr_hw_tracking = 0, - .platform_engine_mask = + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0), }; #define DGFX_FEATURES \ - .memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \ + .__runtime.memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \ .has_llc = 0, \ .has_pxp = 0, \ .has_snoop = 1, \ @@ -918,24 +923,24 @@ static const struct intel_device_info rkl_info = { static const struct intel_device_info dg1_info = { GEN12_FEATURES, DGFX_FEATURES, - .graphics.rel = 10, + .__runtime.graphics.ip.rel = 10, PLATFORM(INTEL_DG1), - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), .require_force_probe = 1, - .platform_engine_mask = + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), /* Wa_16011227922 */ - .ppgtt_size = 47, + .__runtime.ppgtt_size = 47, }; static const struct intel_device_info adl_s_info = { GEN12_FEATURES, PLATFORM(INTEL_ALDERLAKE_S), - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), .display.has_hti = 1, .display.has_psr_hw_tracking = 0, - .platform_engine_mask = + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), .dma_mask_size = 39, }; @@ -951,18 +956,18 @@ static const struct intel_device_info adl_s_info = { .display.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \ BIT(DBUF_S4), \ .display.has_ddi = 1, \ - .display.has_dmc = 1, \ + .__runtime.has_dmc = 1, \ .display.has_dp_mst = 1, \ .display.has_dsb = 1, \ - .display.has_dsc = 1, \ - .display.fbc_mask = BIT(INTEL_FBC_A), \ + .__runtime.has_dsc = 1, \ + .__runtime.fbc_mask = BIT(INTEL_FBC_A), \ .display.has_fpga_dbg = 1, \ - .display.has_hdcp = 1, \ + .__runtime.has_hdcp = 1, \ .display.has_hotplug = 1, \ .display.has_ipc = 1, \ .display.has_psr = 1, \ - .display.ver = 13, \ - .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ + .__runtime.display.ip.ver = 13, \ + .__runtime.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ .display.pipe_offsets = { \ [TRANSCODER_A] = PIPE_A_OFFSET, \ [TRANSCODER_B] = PIPE_B_OFFSET, \ @@ -985,28 +990,28 @@ static const struct intel_device_info adl_p_info = { GEN12_FEATURES, XE_LPD_FEATURES, PLATFORM(INTEL_ALDERLAKE_P), - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), .display.has_cdclk_crawl = 1, .display.has_modular_fia = 1, .display.has_psr_hw_tracking = 0, - .platform_engine_mask = + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), - .ppgtt_size = 48, + .__runtime.ppgtt_size = 48, .dma_mask_size = 39, }; #undef GEN #define XE_HP_PAGE_SIZES \ - .page_sizes = I915_GTT_PAGE_SIZE_4K | \ - I915_GTT_PAGE_SIZE_64K | \ - I915_GTT_PAGE_SIZE_2M + .__runtime.page_sizes = I915_GTT_PAGE_SIZE_4K | \ + I915_GTT_PAGE_SIZE_64K | \ + I915_GTT_PAGE_SIZE_2M #define XE_HP_FEATURES \ - .graphics.ver = 12, \ - .graphics.rel = 50, \ + .__runtime.graphics.ip.ver = 12, \ + .__runtime.graphics.ip.rel = 50, \ XE_HP_PAGE_SIZES, \ .dma_mask_size = 46, \ .has_3d_pipeline = 1, \ @@ -1022,12 +1027,12 @@ static const struct intel_device_info adl_p_info = { .has_reset_engine = 1, \ .has_rps = 1, \ .has_runtime_pm = 1, \ - .ppgtt_size = 48, \ - .ppgtt_type = INTEL_PPGTT_FULL + .__runtime.ppgtt_size = 48, \ + .__runtime.ppgtt_type = INTEL_PPGTT_FULL #define XE_HPM_FEATURES \ - .media.ver = 12, \ - .media.rel = 50 + .__runtime.media.ip.ver = 12, \ + .__runtime.media.ip.rel = 50 __maybe_unused static const struct intel_device_info xehpsdv_info = { @@ -1035,11 +1040,11 @@ static const struct intel_device_info xehpsdv_info = { XE_HPM_FEATURES, DGFX_FEATURES, PLATFORM(INTEL_XEHPSDV), - .display = { }, + NO_DISPLAY, .has_64k_pages = 1, .needs_compact_pt = 1, .has_media_ratio_mode = 1, - .platform_engine_mask = + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) | BIT(VCS0) | BIT(VCS1) | BIT(VCS2) | BIT(VCS3) | @@ -1052,8 +1057,8 @@ static const struct intel_device_info xehpsdv_info = { XE_HP_FEATURES, \ XE_HPM_FEATURES, \ DGFX_FEATURES, \ - .graphics.rel = 55, \ - .media.rel = 55, \ + .__runtime.graphics.ip.rel = 55, \ + .__runtime.media.ip.rel = 55, \ PLATFORM(INTEL_DG2), \ .has_4tile = 1, \ .has_64k_pages = 1, \ @@ -1061,7 +1066,7 @@ static const struct intel_device_info xehpsdv_info = { .has_heci_pxp = 1, \ .needs_compact_pt = 1, \ .has_media_ratio_mode = 1, \ - .platform_engine_mask = \ + .__runtime.platform_engine_mask = \ BIT(RCS0) | BIT(BCS0) | \ BIT(VECS0) | BIT(VECS1) | \ BIT(VCS0) | BIT(VCS2) | \ @@ -1070,14 +1075,14 @@ static const struct intel_device_info xehpsdv_info = { static const struct intel_device_info dg2_info = { DG2_FEATURES, XE_LPD_FEATURES, - .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_D), .require_force_probe = 1, }; static const struct intel_device_info ats_m_info = { DG2_FEATURES, - .display = { 0 }, + NO_DISPLAY, .require_force_probe = 1, .tuning_thread_rr_after_dep = 1, }; @@ -1096,12 +1101,12 @@ static const struct intel_device_info pvc_info = { XE_HPC_FEATURES, XE_HPM_FEATURES, DGFX_FEATURES, - .graphics.rel = 60, - .media.rel = 60, + .__runtime.graphics.ip.rel = 60, + .__runtime.media.ip.rel = 60, PLATFORM(INTEL_PONTEVECCHIO), - .display = { 0 }, + NO_DISPLAY, .has_flat_ccs = 0, - .platform_engine_mask = + .__runtime.platform_engine_mask = BIT(BCS0) | BIT(VCS0) | BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3), @@ -1110,8 +1115,19 @@ static const struct intel_device_info pvc_info = { #define XE_LPDP_FEATURES \ XE_LPD_FEATURES, \ - .display.ver = 14, \ - .display.has_cdclk_crawl = 1 + .__runtime.display.ip.ver = 14, \ + .display.has_cdclk_crawl = 1, \ + .__runtime.fbc_mask = BIT(INTEL_FBC_A) | BIT(INTEL_FBC_B) + +static const struct intel_gt_definition xelpmp_extra_gt[] = { + { + .type = GT_MEDIA, + .name = "Standalone Media GT", + .gsi_offset = MTL_MEDIA_GSI_BASE, + .engine_mask = BIT(VECS0) | BIT(VCS0) | BIT(VCS2), + }, + {} +}; __maybe_unused static const struct intel_device_info mtl_info = { @@ -1121,15 +1137,16 @@ static const struct intel_device_info mtl_info = { * Real graphics IP version will be obtained from hardware GMD_ID * register. Value provided here is just for sanity checking. */ - .graphics.ver = 12, - .graphics.rel = 70, - .media.ver = 13, + .__runtime.graphics.ip.ver = 12, + .__runtime.graphics.ip.rel = 70, + .__runtime.media.ip.ver = 13, PLATFORM(INTEL_METEORLAKE), .display.has_modular_fia = 1, + .extra_gt_list = xelpmp_extra_gt, .has_flat_ccs = 0, .has_snoop = 1, - .memory_regions = REGION_SMEM | REGION_STOLEN_LMEM, - .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(CCS0), + .__runtime.memory_regions = REGION_SMEM | REGION_STOLEN_LMEM, + .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(CCS0), .require_force_probe = 1, }; @@ -1263,6 +1280,27 @@ static bool force_probe(u16 device_id, const char *devices) return ret; } +bool i915_pci_resource_valid(struct pci_dev *pdev, int bar) +{ + if (!pci_resource_flags(pdev, bar)) + return false; + + if (pci_resource_flags(pdev, bar) & IORESOURCE_UNSET) + return false; + + if (!pci_resource_len(pdev, bar)) + return false; + + return true; +} + +static bool intel_mmio_bar_valid(struct pci_dev *pdev, struct intel_device_info *intel_info) +{ + int gttmmaddr_bar = intel_info->__runtime.graphics.ip.ver == 2 ? GEN2_GTTMMADR_BAR : GTTMMADR_BAR; + + return i915_pci_resource_valid(pdev, gttmmaddr_bar); +} + static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct intel_device_info *intel_info = @@ -1288,6 +1326,9 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (PCI_FUNC(pdev->devfn)) return -ENODEV; + if (!intel_mmio_bar_valid(pdev, intel_info)) + return -ENXIO; + /* Detect if we need to wait for other drivers early on */ if (intel_modeset_probe_defer(pdev)) return -EPROBE_DEFER; diff --git a/drivers/gpu/drm/i915/i915_pci.h b/drivers/gpu/drm/i915/i915_pci.h index ee048c238174..8dfe19f9a775 100644 --- a/drivers/gpu/drm/i915/i915_pci.h +++ b/drivers/gpu/drm/i915/i915_pci.h @@ -6,7 +6,13 @@ #ifndef __I915_PCI_H__ #define __I915_PCI_H__ +#include <linux/types.h> + +struct pci_dev; + int i915_pci_register_driver(void); void i915_pci_unregister_driver(void); +bool i915_pci_resource_valid(struct pci_dev *pdev, int bar); + #endif /* __I915_PCI_H__ */ diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index f3c23fe9ad9c..0defbb43ceea 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1376,7 +1376,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream) { struct i915_perf *perf = stream->perf; - BUG_ON(stream != perf->exclusive_stream); + if (WARN_ON(stream != perf->exclusive_stream)) + return; /* * Unset exclusive_stream first, it will be checked while disabling diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3168d7007e10..1a9bd829fc7e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1125,8 +1125,12 @@ #define MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN REG_BIT(16) /* tgl+ */ #define MBUS_DBOX_BW_CREDIT_MASK REG_GENMASK(15, 14) #define MBUS_DBOX_BW_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, x) +#define MBUS_DBOX_BW_4CREDITS_MTL REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, 0x2) +#define MBUS_DBOX_BW_8CREDITS_MTL REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, 0x3) #define MBUS_DBOX_B_CREDIT_MASK REG_GENMASK(12, 8) #define MBUS_DBOX_B_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_B_CREDIT_MASK, x) +#define MBUS_DBOX_I_CREDIT_MASK REG_GENMASK(7, 5) +#define MBUS_DBOX_I_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_I_CREDIT_MASK, x) #define MBUS_DBOX_A_CREDIT_MASK REG_GENMASK(3, 0) #define MBUS_DBOX_A_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_A_CREDIT_MASK, x) @@ -1462,69 +1466,6 @@ #define FBC_REND_CACHE_CLEAN REG_BIT(1) /* - * GPIO regs - */ -#define GPIO(gpio) _MMIO(dev_priv->gpio_mmio_base + 0x5010 + \ - 4 * (gpio)) - -# define GPIO_CLOCK_DIR_MASK (1 << 0) -# define GPIO_CLOCK_DIR_IN (0 << 1) -# define GPIO_CLOCK_DIR_OUT (1 << 1) -# define GPIO_CLOCK_VAL_MASK (1 << 2) -# define GPIO_CLOCK_VAL_OUT (1 << 3) -# define GPIO_CLOCK_VAL_IN (1 << 4) -# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5) -# define GPIO_DATA_DIR_MASK (1 << 8) -# define GPIO_DATA_DIR_IN (0 << 9) -# define GPIO_DATA_DIR_OUT (1 << 9) -# define GPIO_DATA_VAL_MASK (1 << 10) -# define GPIO_DATA_VAL_OUT (1 << 11) -# define GPIO_DATA_VAL_IN (1 << 12) -# define GPIO_DATA_PULLUP_DISABLE (1 << 13) - -#define GMBUS0 _MMIO(dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */ -#define GMBUS_AKSV_SELECT (1 << 11) -#define GMBUS_RATE_100KHZ (0 << 8) -#define GMBUS_RATE_50KHZ (1 << 8) -#define GMBUS_RATE_400KHZ (2 << 8) /* reserved on Pineview */ -#define GMBUS_RATE_1MHZ (3 << 8) /* reserved on Pineview */ -#define GMBUS_HOLD_EXT (1 << 7) /* 300ns hold time, rsvd on Pineview */ -#define GMBUS_BYTE_CNT_OVERRIDE (1 << 6) - -#define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */ -#define GMBUS_SW_CLR_INT (1 << 31) -#define GMBUS_SW_RDY (1 << 30) -#define GMBUS_ENT (1 << 29) /* enable timeout */ -#define GMBUS_CYCLE_NONE (0 << 25) -#define GMBUS_CYCLE_WAIT (1 << 25) -#define GMBUS_CYCLE_INDEX (2 << 25) -#define GMBUS_CYCLE_STOP (4 << 25) -#define GMBUS_BYTE_COUNT_SHIFT 16 -#define GMBUS_BYTE_COUNT_MAX 256U -#define GEN9_GMBUS_BYTE_COUNT_MAX 511U -#define GMBUS_SLAVE_INDEX_SHIFT 8 -#define GMBUS_SLAVE_ADDR_SHIFT 1 -#define GMBUS_SLAVE_READ (1 << 0) -#define GMBUS_SLAVE_WRITE (0 << 0) -#define GMBUS2 _MMIO(dev_priv->gpio_mmio_base + 0x5108) /* status */ -#define GMBUS_INUSE (1 << 15) -#define GMBUS_HW_WAIT_PHASE (1 << 14) -#define GMBUS_STALL_TIMEOUT (1 << 13) -#define GMBUS_INT (1 << 12) -#define GMBUS_HW_RDY (1 << 11) -#define GMBUS_SATOER (1 << 10) -#define GMBUS_ACTIVE (1 << 9) -#define GMBUS3 _MMIO(dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */ -#define GMBUS4 _MMIO(dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */ -#define GMBUS_SLAVE_TIMEOUT_EN (1 << 4) -#define GMBUS_NAK_EN (1 << 3) -#define GMBUS_IDLE_EN (1 << 2) -#define GMBUS_HW_WAIT_EN (1 << 1) -#define GMBUS_HW_RDY_EN (1 << 0) -#define GMBUS5 _MMIO(dev_priv->gpio_mmio_base + 0x5120) /* byte index */ -#define GMBUS_2BYTE_INDEX_EN (1 << 31) - -/* * Clock control & power management */ #define _DPLL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x6014) @@ -1700,7 +1641,7 @@ #define DSTATE_PLL_D3_OFF (1 << 3) #define DSTATE_GFX_CLOCK_GATING (1 << 1) #define DSTATE_DOT_CLOCK_GATING (1 << 0) -#define DSPCLK_GATE_D _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x6200) +#define DSPCLK_GATE_D(__i915) _MMIO(DISPLAY_MMIO_BASE(__i915) + 0x6200) # define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */ # define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */ # define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */ @@ -1857,14 +1798,14 @@ #define GT0_PERF_LIMIT_REASONS _MMIO(0x1381a8) #define GT0_PERF_LIMIT_REASONS_MASK 0xde3 -#define PROCHOT_MASK REG_BIT(1) -#define THERMAL_LIMIT_MASK REG_BIT(2) -#define RATL_MASK REG_BIT(6) -#define VR_THERMALERT_MASK REG_BIT(7) -#define VR_TDC_MASK REG_BIT(8) -#define POWER_LIMIT_4_MASK REG_BIT(9) -#define POWER_LIMIT_1_MASK REG_BIT(11) -#define POWER_LIMIT_2_MASK REG_BIT(12) +#define PROCHOT_MASK REG_BIT(0) +#define THERMAL_LIMIT_MASK REG_BIT(1) +#define RATL_MASK REG_BIT(5) +#define VR_THERMALERT_MASK REG_BIT(6) +#define VR_TDC_MASK REG_BIT(7) +#define POWER_LIMIT_4_MASK REG_BIT(8) +#define POWER_LIMIT_1_MASK REG_BIT(10) +#define POWER_LIMIT_2_MASK REG_BIT(11) #define CHV_CLK_CTL1 _MMIO(0x101100) #define VLV_CLK_CTL2 _MMIO(0x101104) @@ -1916,6 +1857,13 @@ #define CLKGATE_DIS_PSL(pipe) \ _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_A, _CLKGATE_DIS_PSL_B) +#define _CLKGATE_DIS_PSL_EXT_A 0x4654C +#define _CLKGATE_DIS_PSL_EXT_B 0x46550 +#define PIPEDMC_GATING_DIS REG_BIT(12) + +#define CLKGATE_DIS_PSL_EXT(pipe) \ + _MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_EXT_A, _CLKGATE_DIS_PSL_EXT_B) + /* * Display engine regs */ @@ -2822,7 +2770,7 @@ #define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE) #define PCH_PPS_BASE 0xC7200 -#define _MMIO_PPS(pps_idx, reg) _MMIO(dev_priv->pps_mmio_base - \ +#define _MMIO_PPS(pps_idx, reg) _MMIO(dev_priv->display.pps.mmio_base - \ PPS_BASE + (reg) + \ (pps_idx) * 0x100) @@ -2918,118 +2866,6 @@ #define PFIT_AUTO_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61238) -#define _VLV_BLC_PWM_CTL2_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61250) -#define _VLV_BLC_PWM_CTL2_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61350) -#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \ - _VLV_BLC_PWM_CTL2_B) - -#define _VLV_BLC_PWM_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61254) -#define _VLV_BLC_PWM_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61354) -#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \ - _VLV_BLC_PWM_CTL_B) - -#define _VLV_BLC_HIST_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61260) -#define _VLV_BLC_HIST_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61360) -#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \ - _VLV_BLC_HIST_CTL_B) - -/* Backlight control */ -#define BLC_PWM_CTL2 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61250) /* 965+ only */ -#define BLM_PWM_ENABLE (1 << 31) -#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */ -#define BLM_PIPE_SELECT (1 << 29) -#define BLM_PIPE_SELECT_IVB (3 << 29) -#define BLM_PIPE_A (0 << 29) -#define BLM_PIPE_B (1 << 29) -#define BLM_PIPE_C (2 << 29) /* ivb + */ -#define BLM_TRANSCODER_A BLM_PIPE_A /* hsw */ -#define BLM_TRANSCODER_B BLM_PIPE_B -#define BLM_TRANSCODER_C BLM_PIPE_C -#define BLM_TRANSCODER_EDP (3 << 29) -#define BLM_PIPE(pipe) ((pipe) << 29) -#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */ -#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26) -#define BLM_PHASE_IN_ENABLE (1 << 25) -#define BLM_PHASE_IN_INTERUPT_ENABL (1 << 24) -#define BLM_PHASE_IN_TIME_BASE_SHIFT (16) -#define BLM_PHASE_IN_TIME_BASE_MASK (0xff << 16) -#define BLM_PHASE_IN_COUNT_SHIFT (8) -#define BLM_PHASE_IN_COUNT_MASK (0xff << 8) -#define BLM_PHASE_IN_INCR_SHIFT (0) -#define BLM_PHASE_IN_INCR_MASK (0xff << 0) -#define BLC_PWM_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61254) -/* - * This is the most significant 15 bits of the number of backlight cycles in a - * complete cycle of the modulated backlight control. - * - * The actual value is this field multiplied by two. - */ -#define BACKLIGHT_MODULATION_FREQ_SHIFT (17) -#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) -#define BLM_LEGACY_MODE (1 << 16) /* gen2 only */ -/* - * This is the number of cycles out of the backlight modulation cycle for which - * the backlight is on. - * - * This field must be no greater than the number of cycles in the complete - * backlight modulation cycle. - */ -#define BACKLIGHT_DUTY_CYCLE_SHIFT (0) -#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) -#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe) -#define BLM_POLARITY_PNV (1 << 0) /* pnv only */ - -#define BLC_HIST_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61260) -#define BLM_HISTOGRAM_ENABLE (1 << 31) - -/* New registers for PCH-split platforms. Safe where new bits show up, the - * register layout machtes with gen4 BLC_PWM_CTL[12]. */ -#define BLC_PWM_CPU_CTL2 _MMIO(0x48250) -#define BLC_PWM_CPU_CTL _MMIO(0x48254) - -#define HSW_BLC_PWM2_CTL _MMIO(0x48350) - -/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is - * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */ -#define BLC_PWM_PCH_CTL1 _MMIO(0xc8250) -#define BLM_PCH_PWM_ENABLE (1 << 31) -#define BLM_PCH_OVERRIDE_ENABLE (1 << 30) -#define BLM_PCH_POLARITY (1 << 29) -#define BLC_PWM_PCH_CTL2 _MMIO(0xc8254) - -#define UTIL_PIN_CTL _MMIO(0x48400) -#define UTIL_PIN_ENABLE (1 << 31) -#define UTIL_PIN_PIPE_MASK (3 << 29) -#define UTIL_PIN_PIPE(x) ((x) << 29) -#define UTIL_PIN_MODE_MASK (0xf << 24) -#define UTIL_PIN_MODE_DATA (0 << 24) -#define UTIL_PIN_MODE_PWM (1 << 24) -#define UTIL_PIN_MODE_VBLANK (4 << 24) -#define UTIL_PIN_MODE_VSYNC (5 << 24) -#define UTIL_PIN_MODE_EYE_LEVEL (8 << 24) -#define UTIL_PIN_OUTPUT_DATA (1 << 23) -#define UTIL_PIN_POLARITY (1 << 22) -#define UTIL_PIN_DIRECTION_INPUT (1 << 19) -#define UTIL_PIN_INPUT_DATA (1 << 16) - -/* BXT backlight register definition. */ -#define _BXT_BLC_PWM_CTL1 0xC8250 -#define BXT_BLC_PWM_ENABLE (1 << 31) -#define BXT_BLC_PWM_POLARITY (1 << 29) -#define _BXT_BLC_PWM_FREQ1 0xC8254 -#define _BXT_BLC_PWM_DUTY1 0xC8258 - -#define _BXT_BLC_PWM_CTL2 0xC8350 -#define _BXT_BLC_PWM_FREQ2 0xC8354 -#define _BXT_BLC_PWM_DUTY2 0xC8358 - -#define BXT_BLC_PWM_CTL(controller) _MMIO_PIPE(controller, \ - _BXT_BLC_PWM_CTL1, _BXT_BLC_PWM_CTL2) -#define BXT_BLC_PWM_FREQ(controller) _MMIO_PIPE(controller, \ - _BXT_BLC_PWM_FREQ1, _BXT_BLC_PWM_FREQ2) -#define BXT_BLC_PWM_DUTY(controller) _MMIO_PIPE(controller, \ - _BXT_BLC_PWM_DUTY1, _BXT_BLC_PWM_DUTY2) - #define PCH_GTC_CTL _MMIO(0xe7000) #define PCH_GTC_ENABLE (1 << 31) @@ -3619,6 +3455,34 @@ #define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL) #define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */ +#define _XELPDP_USBC1_AUX_CH_CTL 0x16F210 +#define _XELPDP_USBC2_AUX_CH_CTL 0x16F410 +#define _XELPDP_USBC3_AUX_CH_CTL 0x16F610 +#define _XELPDP_USBC4_AUX_CH_CTL 0x16F810 + +#define XELPDP_DP_AUX_CH_CTL(aux_ch) _MMIO(_PICK(aux_ch, \ + _DPA_AUX_CH_CTL, \ + _DPB_AUX_CH_CTL, \ + 0, /* port/aux_ch C is non-existent */ \ + _XELPDP_USBC1_AUX_CH_CTL, \ + _XELPDP_USBC2_AUX_CH_CTL, \ + _XELPDP_USBC3_AUX_CH_CTL, \ + _XELPDP_USBC4_AUX_CH_CTL)) + +#define _XELPDP_USBC1_AUX_CH_DATA1 0x16F214 +#define _XELPDP_USBC2_AUX_CH_DATA1 0x16F414 +#define _XELPDP_USBC3_AUX_CH_DATA1 0x16F614 +#define _XELPDP_USBC4_AUX_CH_DATA1 0x16F814 + +#define XELPDP_DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PICK(aux_ch, \ + _DPA_AUX_CH_DATA1, \ + _DPB_AUX_CH_DATA1, \ + 0, /* port/aux_ch C is non-existent */ \ + _XELPDP_USBC1_AUX_CH_DATA1, \ + _XELPDP_USBC2_AUX_CH_DATA1, \ + _XELPDP_USBC3_AUX_CH_DATA1, \ + _XELPDP_USBC4_AUX_CH_DATA1) + (i) * 4) + #define DP_AUX_CH_CTL_SEND_BUSY (1 << 31) #define DP_AUX_CH_CTL_DONE (1 << 30) #define DP_AUX_CH_CTL_INTERRUPT (1 << 29) @@ -3631,6 +3495,8 @@ #define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25) #define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20) #define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20 +#define XELPDP_DP_AUX_CH_CTL_POWER_REQUEST REG_BIT(19) +#define XELPDP_DP_AUX_CH_CTL_POWER_STATUS REG_BIT(18) #define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16) #define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16 #define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15) @@ -5862,6 +5728,13 @@ [TRANSCODER_B] = _CHICKEN_TRANS_B, \ [TRANSCODER_C] = _CHICKEN_TRANS_C, \ [TRANSCODER_D] = _CHICKEN_TRANS_D)) + +#define _MTL_CHICKEN_TRANS_A 0x604e0 +#define _MTL_CHICKEN_TRANS_B 0x614e0 +#define MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \ + _MTL_CHICKEN_TRANS_A, \ + _MTL_CHICKEN_TRANS_B) + #define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) #define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x) #define VSC_DATA_SEL_SOFTWARE_CONTROL REG_BIT(25) /* GLK */ @@ -5926,7 +5799,8 @@ _BW_BUDDY1_PAGE_MASK)) #define HSW_NDE_RSTWRN_OPT _MMIO(0x46408) -#define RESET_PCH_HANDSHAKE_ENABLE (1 << 4) +#define MTL_RESET_PICA_HANDSHAKE_EN REG_BIT(6) +#define RESET_PCH_HANDSHAKE_ENABLE REG_BIT(4) #define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430) #define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30) @@ -6718,10 +6592,10 @@ #define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) #define BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ 0x18 #define GEN9_PCODE_READ_MEM_LATENCY 0x6 -#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF -#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8 -#define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16 -#define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24 +#define GEN9_MEM_LATENCY_LEVEL_3_7_MASK REG_GENMASK(31, 24) +#define GEN9_MEM_LATENCY_LEVEL_2_6_MASK REG_GENMASK(23, 16) +#define GEN9_MEM_LATENCY_LEVEL_1_5_MASK REG_GENMASK(15, 8) +#define GEN9_MEM_LATENCY_LEVEL_0_4_MASK REG_GENMASK(7, 0) #define SKL_PCODE_LOAD_HDCP_KEYS 0x5 #define SKL_PCODE_CDCLK_CONTROL 0x7 #define SKL_CDCLK_PREPARE_FOR_CHANGE 0x3 @@ -6937,265 +6811,6 @@ enum skl_power_gate { #define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7) #define ICL_AUX_ANAOVRD1_ENABLE (1 << 0) -/* HDCP Key Registers */ -#define HDCP_KEY_CONF _MMIO(0x66c00) -#define HDCP_AKSV_SEND_TRIGGER BIT(31) -#define HDCP_CLEAR_KEYS_TRIGGER BIT(30) -#define HDCP_KEY_LOAD_TRIGGER BIT(8) -#define HDCP_KEY_STATUS _MMIO(0x66c04) -#define HDCP_FUSE_IN_PROGRESS BIT(7) -#define HDCP_FUSE_ERROR BIT(6) -#define HDCP_FUSE_DONE BIT(5) -#define HDCP_KEY_LOAD_STATUS BIT(1) -#define HDCP_KEY_LOAD_DONE BIT(0) -#define HDCP_AKSV_LO _MMIO(0x66c10) -#define HDCP_AKSV_HI _MMIO(0x66c14) - -/* HDCP Repeater Registers */ -#define HDCP_REP_CTL _MMIO(0x66d00) -#define HDCP_TRANSA_REP_PRESENT BIT(31) -#define HDCP_TRANSB_REP_PRESENT BIT(30) -#define HDCP_TRANSC_REP_PRESENT BIT(29) -#define HDCP_TRANSD_REP_PRESENT BIT(28) -#define HDCP_DDIB_REP_PRESENT BIT(30) -#define HDCP_DDIA_REP_PRESENT BIT(29) -#define HDCP_DDIC_REP_PRESENT BIT(28) -#define HDCP_DDID_REP_PRESENT BIT(27) -#define HDCP_DDIF_REP_PRESENT BIT(26) -#define HDCP_DDIE_REP_PRESENT BIT(25) -#define HDCP_TRANSA_SHA1_M0 (1 << 20) -#define HDCP_TRANSB_SHA1_M0 (2 << 20) -#define HDCP_TRANSC_SHA1_M0 (3 << 20) -#define HDCP_TRANSD_SHA1_M0 (4 << 20) -#define HDCP_DDIB_SHA1_M0 (1 << 20) -#define HDCP_DDIA_SHA1_M0 (2 << 20) -#define HDCP_DDIC_SHA1_M0 (3 << 20) -#define HDCP_DDID_SHA1_M0 (4 << 20) -#define HDCP_DDIF_SHA1_M0 (5 << 20) -#define HDCP_DDIE_SHA1_M0 (6 << 20) /* Bspec says 5? */ -#define HDCP_SHA1_BUSY BIT(16) -#define HDCP_SHA1_READY BIT(17) -#define HDCP_SHA1_COMPLETE BIT(18) -#define HDCP_SHA1_V_MATCH BIT(19) -#define HDCP_SHA1_TEXT_32 (1 << 1) -#define HDCP_SHA1_COMPLETE_HASH (2 << 1) -#define HDCP_SHA1_TEXT_24 (4 << 1) -#define HDCP_SHA1_TEXT_16 (5 << 1) -#define HDCP_SHA1_TEXT_8 (6 << 1) -#define HDCP_SHA1_TEXT_0 (7 << 1) -#define HDCP_SHA_V_PRIME_H0 _MMIO(0x66d04) -#define HDCP_SHA_V_PRIME_H1 _MMIO(0x66d08) -#define HDCP_SHA_V_PRIME_H2 _MMIO(0x66d0C) -#define HDCP_SHA_V_PRIME_H3 _MMIO(0x66d10) -#define HDCP_SHA_V_PRIME_H4 _MMIO(0x66d14) -#define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + (h) * 4)) -#define HDCP_SHA_TEXT _MMIO(0x66d18) - -/* HDCP Auth Registers */ -#define _PORTA_HDCP_AUTHENC 0x66800 -#define _PORTB_HDCP_AUTHENC 0x66500 -#define _PORTC_HDCP_AUTHENC 0x66600 -#define _PORTD_HDCP_AUTHENC 0x66700 -#define _PORTE_HDCP_AUTHENC 0x66A00 -#define _PORTF_HDCP_AUTHENC 0x66900 -#define _PORT_HDCP_AUTHENC(port, x) _MMIO(_PICK(port, \ - _PORTA_HDCP_AUTHENC, \ - _PORTB_HDCP_AUTHENC, \ - _PORTC_HDCP_AUTHENC, \ - _PORTD_HDCP_AUTHENC, \ - _PORTE_HDCP_AUTHENC, \ - _PORTF_HDCP_AUTHENC) + (x)) -#define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0) -#define _TRANSA_HDCP_CONF 0x66400 -#define _TRANSB_HDCP_CONF 0x66500 -#define TRANS_HDCP_CONF(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \ - _TRANSB_HDCP_CONF) -#define HDCP_CONF(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP_CONF(trans) : \ - PORT_HDCP_CONF(port)) - -#define HDCP_CONF_CAPTURE_AN BIT(0) -#define HDCP_CONF_AUTH_AND_ENC (BIT(1) | BIT(0)) -#define PORT_HDCP_ANINIT(port) _PORT_HDCP_AUTHENC(port, 0x4) -#define _TRANSA_HDCP_ANINIT 0x66404 -#define _TRANSB_HDCP_ANINIT 0x66504 -#define TRANS_HDCP_ANINIT(trans) _MMIO_TRANS(trans, \ - _TRANSA_HDCP_ANINIT, \ - _TRANSB_HDCP_ANINIT) -#define HDCP_ANINIT(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP_ANINIT(trans) : \ - PORT_HDCP_ANINIT(port)) - -#define PORT_HDCP_ANLO(port) _PORT_HDCP_AUTHENC(port, 0x8) -#define _TRANSA_HDCP_ANLO 0x66408 -#define _TRANSB_HDCP_ANLO 0x66508 -#define TRANS_HDCP_ANLO(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \ - _TRANSB_HDCP_ANLO) -#define HDCP_ANLO(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP_ANLO(trans) : \ - PORT_HDCP_ANLO(port)) - -#define PORT_HDCP_ANHI(port) _PORT_HDCP_AUTHENC(port, 0xC) -#define _TRANSA_HDCP_ANHI 0x6640C -#define _TRANSB_HDCP_ANHI 0x6650C -#define TRANS_HDCP_ANHI(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \ - _TRANSB_HDCP_ANHI) -#define HDCP_ANHI(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP_ANHI(trans) : \ - PORT_HDCP_ANHI(port)) - -#define PORT_HDCP_BKSVLO(port) _PORT_HDCP_AUTHENC(port, 0x10) -#define _TRANSA_HDCP_BKSVLO 0x66410 -#define _TRANSB_HDCP_BKSVLO 0x66510 -#define TRANS_HDCP_BKSVLO(trans) _MMIO_TRANS(trans, \ - _TRANSA_HDCP_BKSVLO, \ - _TRANSB_HDCP_BKSVLO) -#define HDCP_BKSVLO(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP_BKSVLO(trans) : \ - PORT_HDCP_BKSVLO(port)) - -#define PORT_HDCP_BKSVHI(port) _PORT_HDCP_AUTHENC(port, 0x14) -#define _TRANSA_HDCP_BKSVHI 0x66414 -#define _TRANSB_HDCP_BKSVHI 0x66514 -#define TRANS_HDCP_BKSVHI(trans) _MMIO_TRANS(trans, \ - _TRANSA_HDCP_BKSVHI, \ - _TRANSB_HDCP_BKSVHI) -#define HDCP_BKSVHI(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP_BKSVHI(trans) : \ - PORT_HDCP_BKSVHI(port)) - -#define PORT_HDCP_RPRIME(port) _PORT_HDCP_AUTHENC(port, 0x18) -#define _TRANSA_HDCP_RPRIME 0x66418 -#define _TRANSB_HDCP_RPRIME 0x66518 -#define TRANS_HDCP_RPRIME(trans) _MMIO_TRANS(trans, \ - _TRANSA_HDCP_RPRIME, \ - _TRANSB_HDCP_RPRIME) -#define HDCP_RPRIME(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP_RPRIME(trans) : \ - PORT_HDCP_RPRIME(port)) - -#define PORT_HDCP_STATUS(port) _PORT_HDCP_AUTHENC(port, 0x1C) -#define _TRANSA_HDCP_STATUS 0x6641C -#define _TRANSB_HDCP_STATUS 0x6651C -#define TRANS_HDCP_STATUS(trans) _MMIO_TRANS(trans, \ - _TRANSA_HDCP_STATUS, \ - _TRANSB_HDCP_STATUS) -#define HDCP_STATUS(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP_STATUS(trans) : \ - PORT_HDCP_STATUS(port)) - -#define HDCP_STATUS_STREAM_A_ENC BIT(31) -#define HDCP_STATUS_STREAM_B_ENC BIT(30) -#define HDCP_STATUS_STREAM_C_ENC BIT(29) -#define HDCP_STATUS_STREAM_D_ENC BIT(28) -#define HDCP_STATUS_AUTH BIT(21) -#define HDCP_STATUS_ENC BIT(20) -#define HDCP_STATUS_RI_MATCH BIT(19) -#define HDCP_STATUS_R0_READY BIT(18) -#define HDCP_STATUS_AN_READY BIT(17) -#define HDCP_STATUS_CIPHER BIT(16) -#define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff) - -/* HDCP2.2 Registers */ -#define _PORTA_HDCP2_BASE 0x66800 -#define _PORTB_HDCP2_BASE 0x66500 -#define _PORTC_HDCP2_BASE 0x66600 -#define _PORTD_HDCP2_BASE 0x66700 -#define _PORTE_HDCP2_BASE 0x66A00 -#define _PORTF_HDCP2_BASE 0x66900 -#define _PORT_HDCP2_BASE(port, x) _MMIO(_PICK((port), \ - _PORTA_HDCP2_BASE, \ - _PORTB_HDCP2_BASE, \ - _PORTC_HDCP2_BASE, \ - _PORTD_HDCP2_BASE, \ - _PORTE_HDCP2_BASE, \ - _PORTF_HDCP2_BASE) + (x)) - -#define PORT_HDCP2_AUTH(port) _PORT_HDCP2_BASE(port, 0x98) -#define _TRANSA_HDCP2_AUTH 0x66498 -#define _TRANSB_HDCP2_AUTH 0x66598 -#define TRANS_HDCP2_AUTH(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_AUTH, \ - _TRANSB_HDCP2_AUTH) -#define AUTH_LINK_AUTHENTICATED BIT(31) -#define AUTH_LINK_TYPE BIT(30) -#define AUTH_FORCE_CLR_INPUTCTR BIT(19) -#define AUTH_CLR_KEYS BIT(18) -#define HDCP2_AUTH(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP2_AUTH(trans) : \ - PORT_HDCP2_AUTH(port)) - -#define PORT_HDCP2_CTL(port) _PORT_HDCP2_BASE(port, 0xB0) -#define _TRANSA_HDCP2_CTL 0x664B0 -#define _TRANSB_HDCP2_CTL 0x665B0 -#define TRANS_HDCP2_CTL(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_CTL, \ - _TRANSB_HDCP2_CTL) -#define CTL_LINK_ENCRYPTION_REQ BIT(31) -#define HDCP2_CTL(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP2_CTL(trans) : \ - PORT_HDCP2_CTL(port)) - -#define PORT_HDCP2_STATUS(port) _PORT_HDCP2_BASE(port, 0xB4) -#define _TRANSA_HDCP2_STATUS 0x664B4 -#define _TRANSB_HDCP2_STATUS 0x665B4 -#define TRANS_HDCP2_STATUS(trans) _MMIO_TRANS(trans, \ - _TRANSA_HDCP2_STATUS, \ - _TRANSB_HDCP2_STATUS) -#define LINK_TYPE_STATUS BIT(22) -#define LINK_AUTH_STATUS BIT(21) -#define LINK_ENCRYPTION_STATUS BIT(20) -#define HDCP2_STATUS(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP2_STATUS(trans) : \ - PORT_HDCP2_STATUS(port)) - -#define _PIPEA_HDCP2_STREAM_STATUS 0x668C0 -#define _PIPEB_HDCP2_STREAM_STATUS 0x665C0 -#define _PIPEC_HDCP2_STREAM_STATUS 0x666C0 -#define _PIPED_HDCP2_STREAM_STATUS 0x667C0 -#define PIPE_HDCP2_STREAM_STATUS(pipe) _MMIO(_PICK((pipe), \ - _PIPEA_HDCP2_STREAM_STATUS, \ - _PIPEB_HDCP2_STREAM_STATUS, \ - _PIPEC_HDCP2_STREAM_STATUS, \ - _PIPED_HDCP2_STREAM_STATUS)) - -#define _TRANSA_HDCP2_STREAM_STATUS 0x664C0 -#define _TRANSB_HDCP2_STREAM_STATUS 0x665C0 -#define TRANS_HDCP2_STREAM_STATUS(trans) _MMIO_TRANS(trans, \ - _TRANSA_HDCP2_STREAM_STATUS, \ - _TRANSB_HDCP2_STREAM_STATUS) -#define STREAM_ENCRYPTION_STATUS BIT(31) -#define STREAM_TYPE_STATUS BIT(30) -#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP2_STREAM_STATUS(trans) : \ - PIPE_HDCP2_STREAM_STATUS(pipe)) - -#define _PORTA_HDCP2_AUTH_STREAM 0x66F00 -#define _PORTB_HDCP2_AUTH_STREAM 0x66F04 -#define PORT_HDCP2_AUTH_STREAM(port) _MMIO_PORT(port, \ - _PORTA_HDCP2_AUTH_STREAM, \ - _PORTB_HDCP2_AUTH_STREAM) -#define _TRANSA_HDCP2_AUTH_STREAM 0x66F00 -#define _TRANSB_HDCP2_AUTH_STREAM 0x66F04 -#define TRANS_HDCP2_AUTH_STREAM(trans) _MMIO_TRANS(trans, \ - _TRANSA_HDCP2_AUTH_STREAM, \ - _TRANSB_HDCP2_AUTH_STREAM) -#define AUTH_STREAM_TYPE BIT(31) -#define HDCP2_AUTH_STREAM(dev_priv, trans, port) \ - (GRAPHICS_VER(dev_priv) >= 12 ? \ - TRANS_HDCP2_AUTH_STREAM(trans) : \ - PORT_HDCP2_AUTH_STREAM(port)) - /* Per-pipe DDI Function Control */ #define _TRANS_DDI_FUNC_CTL_A 0x60400 #define _TRANS_DDI_FUNC_CTL_B 0x61400 @@ -7503,16 +7118,16 @@ enum skl_power_gate { /* CDCLK_CTL */ #define CDCLK_CTL _MMIO(0x46000) -#define CDCLK_FREQ_SEL_MASK (3 << 26) -#define CDCLK_FREQ_450_432 (0 << 26) -#define CDCLK_FREQ_540 (1 << 26) -#define CDCLK_FREQ_337_308 (2 << 26) -#define CDCLK_FREQ_675_617 (3 << 26) -#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3 << 22) -#define BXT_CDCLK_CD2X_DIV_SEL_1 (0 << 22) -#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1 << 22) -#define BXT_CDCLK_CD2X_DIV_SEL_2 (2 << 22) -#define BXT_CDCLK_CD2X_DIV_SEL_4 (3 << 22) +#define CDCLK_FREQ_SEL_MASK REG_GENMASK(27, 26) +#define CDCLK_FREQ_450_432 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 0) +#define CDCLK_FREQ_540 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 1) +#define CDCLK_FREQ_337_308 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 2) +#define CDCLK_FREQ_675_617 REG_FIELD_PREP(CDCLK_FREQ_SEL_MASK, 3) +#define BXT_CDCLK_CD2X_DIV_SEL_MASK REG_GENMASK(23, 22) +#define BXT_CDCLK_CD2X_DIV_SEL_1 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 0) +#define BXT_CDCLK_CD2X_DIV_SEL_1_5 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 1) +#define BXT_CDCLK_CD2X_DIV_SEL_2 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 2) +#define BXT_CDCLK_CD2X_DIV_SEL_4 REG_FIELD_PREP(BXT_CDCLK_CD2X_DIV_SEL_MASK, 3) #define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe) << 20) #define CDCLK_DIVMUX_CD_OVERRIDE (1 << 19) #define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3) @@ -8367,6 +7982,7 @@ enum skl_power_gate { #define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \ _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC) +#define DSC_ALT_ICH_SEL (1 << 20) #define DSC_VBR_ENABLE (1 << 19) #define DSC_422_ENABLE (1 << 18) #define DSC_COLOR_SPACE_CONVERSION (1 << 17) @@ -8717,4 +8333,27 @@ enum skl_power_gate { #define GEN12_CULLBIT2 _MMIO(0x7030) #define GEN12_STATE_ACK_DEBUG _MMIO(0x20BC) +#define MTL_LATENCY_LP0_LP1 _MMIO(0x45780) +#define MTL_LATENCY_LP2_LP3 _MMIO(0x45784) +#define MTL_LATENCY_LP4_LP5 _MMIO(0x45788) +#define MTL_LATENCY_LEVEL_EVEN_MASK REG_GENMASK(12, 0) +#define MTL_LATENCY_LEVEL_ODD_MASK REG_GENMASK(28, 16) + +#define MTL_LATENCY_SAGV _MMIO(0x4578b) +#define MTL_LATENCY_QCLK_SAGV REG_GENMASK(12, 0) + +#define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700) +#define MTL_N_OF_ENABLED_QGV_POINTS_MASK REG_GENMASK(11, 8) +#define MTL_N_OF_POPULATED_CH_MASK REG_GENMASK(7, 4) +#define MTL_DDR_TYPE_MASK REG_GENMASK(3, 0) + +#define MTL_MEM_SS_INFO_QGV_POINT_LOW(point) _MMIO(0x45710 + (point) * 2) +#define MTL_TRCD_MASK REG_GENMASK(31, 24) +#define MTL_TRP_MASK REG_GENMASK(23, 16) +#define MTL_DCLK_MASK REG_GENMASK(15, 0) + +#define MTL_MEM_SS_INFO_QGV_POINT_HIGH(point) _MMIO(0x45714 + (point) * 2) +#define MTL_TRAS_MASK REG_GENMASK(16, 8) +#define MTL_TRDPRE_MASK REG_GENMASK(7, 0) + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index ae984c66c48a..6fc0d1b89690 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -241,8 +241,6 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence, const char *name, struct lock_class_key *key) { - BUG_ON(!fn); - __init_waitqueue_head(&fence->wait, name, key); fence->fn = fn; #ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index a7c603bc1b01..619fc5a22f0c 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h @@ -48,11 +48,15 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence, do { \ static struct lock_class_key __key; \ \ + BUILD_BUG_ON((fn) == NULL); \ __i915_sw_fence_init((fence), (fn), #fence, &__key); \ } while (0) #else #define i915_sw_fence_init(fence, fn) \ - __i915_sw_fence_init((fence), (fn), NULL, NULL) +do { \ + BUILD_BUG_ON((fn) == NULL); \ + __i915_sw_fence_init((fence), (fn), NULL, NULL); \ +} while (0) #endif void i915_sw_fence_reinit(struct i915_sw_fence *fence); diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index c10d68cdc3ca..6c14d13364bf 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -360,10 +360,6 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) #define KHz(x) (1000 * (x)) #define MHz(x) KHz(1000 * (x)) -#define KBps(x) (1000 * (x)) -#define MBps(x) KBps(1000 * (x)) -#define GBps(x) ((u64)1000 * MBps((x))) - void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint); static inline void __add_taint_for_CI(unsigned int taint) { diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index d98fbbd589aa..20575eb77ea7 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -88,46 +88,57 @@ const char *intel_platform_name(enum intel_platform platform) return platform_names[platform]; } -void intel_device_info_print_static(const struct intel_device_info *info, - struct drm_printer *p) +void intel_device_info_print(const struct intel_device_info *info, + const struct intel_runtime_info *runtime, + struct drm_printer *p) { - if (info->graphics.rel) - drm_printf(p, "graphics version: %u.%02u\n", info->graphics.ver, - info->graphics.rel); + if (runtime->graphics.ip.rel) + drm_printf(p, "graphics version: %u.%02u\n", + runtime->graphics.ip.ver, + runtime->graphics.ip.rel); else - drm_printf(p, "graphics version: %u\n", info->graphics.ver); + drm_printf(p, "graphics version: %u\n", + runtime->graphics.ip.ver); - if (info->media.rel) - drm_printf(p, "media version: %u.%02u\n", info->media.ver, info->media.rel); + if (runtime->media.ip.rel) + drm_printf(p, "media version: %u.%02u\n", + runtime->media.ip.ver, + runtime->media.ip.rel); else - drm_printf(p, "media version: %u\n", info->media.ver); + drm_printf(p, "media version: %u\n", + runtime->media.ip.ver); - if (info->display.rel) - drm_printf(p, "display version: %u.%02u\n", info->display.ver, info->display.rel); + if (runtime->display.ip.rel) + drm_printf(p, "display version: %u.%02u\n", + runtime->display.ip.ver, + runtime->display.ip.rel); else - drm_printf(p, "display version: %u\n", info->display.ver); + drm_printf(p, "display version: %u\n", + runtime->display.ip.ver); drm_printf(p, "gt: %d\n", info->gt); - drm_printf(p, "memory-regions: %x\n", info->memory_regions); - drm_printf(p, "page-sizes: %x\n", info->page_sizes); + drm_printf(p, "memory-regions: %x\n", runtime->memory_regions); + drm_printf(p, "page-sizes: %x\n", runtime->page_sizes); drm_printf(p, "platform: %s\n", intel_platform_name(info->platform)); - drm_printf(p, "ppgtt-size: %d\n", info->ppgtt_size); - drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type); + drm_printf(p, "ppgtt-size: %d\n", runtime->ppgtt_size); + drm_printf(p, "ppgtt-type: %d\n", runtime->ppgtt_type); drm_printf(p, "dma_mask_size: %u\n", info->dma_mask_size); #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->name)) DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG); #undef PRINT_FLAG + drm_printf(p, "has_pooled_eu: %s\n", str_yes_no(runtime->has_pooled_eu)); + #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, str_yes_no(info->display.name)) DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG); #undef PRINT_FLAG -} -void intel_device_info_print_runtime(const struct intel_runtime_info *info, - struct drm_printer *p) -{ - drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq); + drm_printf(p, "has_hdcp: %s\n", str_yes_no(runtime->has_hdcp)); + drm_printf(p, "has_dmc: %s\n", str_yes_no(runtime->has_dmc)); + drm_printf(p, "has_dsc: %s\n", str_yes_no(runtime->has_dsc)); + + drm_printf(p, "rawclk rate: %u kHz\n", runtime->rawclk_freq); } #undef INTEL_VGA_DEVICE @@ -364,55 +375,55 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { drm_info(&dev_priv->drm, "Display fused off, disabling\n"); - info->display.pipe_mask = 0; - info->display.cpu_transcoder_mask = 0; - info->display.fbc_mask = 0; + runtime->pipe_mask = 0; + runtime->cpu_transcoder_mask = 0; + runtime->fbc_mask = 0; } else if (fuse_strap & IVB_PIPE_C_DISABLE) { drm_info(&dev_priv->drm, "PipeC fused off\n"); - info->display.pipe_mask &= ~BIT(PIPE_C); - info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_C); + runtime->pipe_mask &= ~BIT(PIPE_C); + runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); } } else if (HAS_DISPLAY(dev_priv) && DISPLAY_VER(dev_priv) >= 9) { u32 dfsm = intel_de_read(dev_priv, SKL_DFSM); if (dfsm & SKL_DFSM_PIPE_A_DISABLE) { - info->display.pipe_mask &= ~BIT(PIPE_A); - info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_A); - info->display.fbc_mask &= ~BIT(INTEL_FBC_A); + runtime->pipe_mask &= ~BIT(PIPE_A); + runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_A); + runtime->fbc_mask &= ~BIT(INTEL_FBC_A); } if (dfsm & SKL_DFSM_PIPE_B_DISABLE) { - info->display.pipe_mask &= ~BIT(PIPE_B); - info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_B); + runtime->pipe_mask &= ~BIT(PIPE_B); + runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_B); } if (dfsm & SKL_DFSM_PIPE_C_DISABLE) { - info->display.pipe_mask &= ~BIT(PIPE_C); - info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_C); + runtime->pipe_mask &= ~BIT(PIPE_C); + runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); } if (DISPLAY_VER(dev_priv) >= 12 && (dfsm & TGL_DFSM_PIPE_D_DISABLE)) { - info->display.pipe_mask &= ~BIT(PIPE_D); - info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_D); + runtime->pipe_mask &= ~BIT(PIPE_D); + runtime->cpu_transcoder_mask &= ~BIT(TRANSCODER_D); } if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE) - info->display.has_hdcp = 0; + runtime->has_hdcp = 0; if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE) - info->display.fbc_mask = 0; + runtime->fbc_mask = 0; if (DISPLAY_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE)) - info->display.has_dmc = 0; + runtime->has_dmc = 0; if (DISPLAY_VER(dev_priv) >= 10 && (dfsm & GLK_DFSM_DISPLAY_DSC_DISABLE)) - info->display.has_dsc = 0; + runtime->has_dsc = 0; } if (GRAPHICS_VER(dev_priv) == 6 && i915_vtd_active(dev_priv)) { drm_info(&dev_priv->drm, "Disabling ppGTT for VT-d support\n"); - info->ppgtt_type = INTEL_PPGTT_NONE; + runtime->ppgtt_type = INTEL_PPGTT_NONE; } runtime->rawclk_freq = intel_read_rawclk(dev_priv); @@ -422,8 +433,14 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) dev_priv->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC); memset(&info->display, 0, sizeof(info->display)); + + runtime->cpu_transcoder_mask = 0; memset(runtime->num_sprites, 0, sizeof(runtime->num_sprites)); memset(runtime->num_scalers, 0, sizeof(runtime->num_scalers)); + runtime->fbc_mask = 0; + runtime->has_hdcp = false; + runtime->has_dmc = false; + runtime->has_dsc = false; } } diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index e681bc6ed8e9..d638235e1d26 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -37,6 +37,7 @@ struct drm_printer; struct drm_i915_private; +struct intel_gt_definition; /* Keep in gen based order, and chronological order within a gen */ enum intel_platform { @@ -164,7 +165,6 @@ enum intel_ppgtt_type { func(has_media_ratio_mode); \ func(has_mslice_steering); \ func(has_one_eu_per_fuse_bit); \ - func(has_pooled_eu); \ func(has_pxp); \ func(has_rc6); \ func(has_rc6p); \ @@ -180,14 +180,11 @@ enum intel_ppgtt_type { /* Keep in alphabetical order */ \ func(cursor_needs_physical); \ func(has_cdclk_crawl); \ - func(has_dmc); \ func(has_ddi); \ func(has_dp_mst); \ func(has_dsb); \ - func(has_dsc); \ func(has_fpga_dbg); \ func(has_gmch); \ - func(has_hdcp); \ func(has_hotplug); \ func(has_hti); \ func(has_ipc); \ @@ -203,23 +200,67 @@ struct ip_version { u8 rel; }; -struct intel_device_info { - struct ip_version graphics; - struct ip_version media; +struct intel_runtime_info { + struct { + struct ip_version ip; + } graphics; + struct { + struct ip_version ip; + } media; + struct { + struct ip_version ip; + } display; + + /* + * Platform mask is used for optimizing or-ed IS_PLATFORM calls into + * single runtime conditionals, and also to provide groundwork for + * future per platform, or per SKU build optimizations. + * + * Array can be extended when necessary if the corresponding + * BUILD_BUG_ON is hit. + */ + u32 platform_mask[2]; + + u16 device_id; intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */ - enum intel_platform platform; + u32 rawclk_freq; - unsigned int dma_mask_size; /* available DMA address bits */ + struct intel_step_info step; + + unsigned int page_sizes; /* page sizes supported by the HW */ enum intel_ppgtt_type ppgtt_type; unsigned int ppgtt_size; /* log2, e.g. 31/32/48 bits */ - unsigned int page_sizes; /* page sizes supported by the HW */ - u32 memory_regions; /* regions supported by the HW */ + bool has_pooled_eu; + + /* display */ + struct { + u8 pipe_mask; + u8 cpu_transcoder_mask; + + u8 num_sprites[I915_MAX_PIPES]; + u8 num_scalers[I915_MAX_PIPES]; + + u8 fbc_mask; + + bool has_hdcp; + bool has_dmc; + bool has_dsc; + }; +}; + +struct intel_device_info { + enum intel_platform platform; + + unsigned int dma_mask_size; /* available DMA address bits */ + + const struct intel_gt_definition *extra_gt_list; + u8 gt; /* GT number, 0 if undefined */ #define DEFINE_FLAG(name) u8 name:1 @@ -227,12 +268,6 @@ struct intel_device_info { #undef DEFINE_FLAG struct { - u8 ver; - u8 rel; - - u8 pipe_mask; - u8 cpu_transcoder_mask; - u8 fbc_mask; u8 abox_mask; struct { @@ -259,27 +294,11 @@ struct intel_device_info { u32 gamma_lut_tests; } color; } display; -}; -struct intel_runtime_info { /* - * Platform mask is used for optimizing or-ed IS_PLATFORM calls into - * into single runtime conditionals, and also to provide groundwork - * for future per platform, or per SKU build optimizations. - * - * Array can be extended when necessary if the corresponding - * BUILD_BUG_ON is hit. + * Initial runtime info. Do not access outside of i915_driver_create(). */ - u32 platform_mask[2]; - - u16 device_id; - - u8 num_sprites[I915_MAX_PIPES]; - u8 num_scalers[I915_MAX_PIPES]; - - u32 rawclk_freq; - - struct intel_step_info step; + const struct intel_runtime_info __runtime; }; struct intel_driver_caps { @@ -292,10 +311,9 @@ const char *intel_platform_name(enum intel_platform platform); void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv); void intel_device_info_runtime_init(struct drm_i915_private *dev_priv); -void intel_device_info_print_static(const struct intel_device_info *info, - struct drm_printer *p); -void intel_device_info_print_runtime(const struct intel_runtime_info *info, - struct drm_printer *p); +void intel_device_info_print(const struct intel_device_info *info, + const struct intel_runtime_info *runtime, + struct drm_printer *p); void intel_driver_caps_print(const struct intel_driver_caps *caps, struct drm_printer *p); diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c index 437447119770..2403ccd52c74 100644 --- a/drivers/gpu/drm/i915/intel_dram.c +++ b/drivers/gpu/drm/i915/intel_dram.c @@ -466,6 +466,43 @@ static int gen12_get_dram_info(struct drm_i915_private *i915) return icl_pcode_read_mem_global_info(i915); } +static int xelpdp_get_dram_info(struct drm_i915_private *i915) +{ + u32 val = intel_uncore_read(&i915->uncore, MTL_MEM_SS_INFO_GLOBAL); + struct dram_info *dram_info = &i915->dram_info; + + val = REG_FIELD_GET(MTL_DDR_TYPE_MASK, val); + switch (val) { + case 0: + dram_info->type = INTEL_DRAM_DDR4; + break; + case 1: + dram_info->type = INTEL_DRAM_DDR5; + break; + case 2: + dram_info->type = INTEL_DRAM_LPDDR5; + break; + case 3: + dram_info->type = INTEL_DRAM_LPDDR4; + break; + case 4: + dram_info->type = INTEL_DRAM_DDR3; + break; + case 5: + dram_info->type = INTEL_DRAM_LPDDR3; + break; + default: + MISSING_CASE(val); + return -EINVAL; + } + + dram_info->num_channels = REG_FIELD_GET(MTL_N_OF_POPULATED_CH_MASK, val); + dram_info->num_qgv_points = REG_FIELD_GET(MTL_N_OF_ENABLED_QGV_POINTS_MASK, val); + /* PSF GV points not supported in D14+ */ + + return 0; +} + void intel_dram_detect(struct drm_i915_private *i915) { struct dram_info *dram_info = &i915->dram_info; @@ -480,7 +517,9 @@ void intel_dram_detect(struct drm_i915_private *i915) */ dram_info->wm_lv_0_adjust_needed = !IS_GEN9_LP(i915); - if (GRAPHICS_VER(i915) >= 12) + if (DISPLAY_VER(i915) >= 14) + ret = xelpdp_get_dram_info(i915); + else if (GRAPHICS_VER(i915) >= 12) ret = gen12_get_dram_info(i915); else if (GRAPHICS_VER(i915) >= 11) ret = gen11_get_dram_info(i915); diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c index 157e166672d7..e015bc91a26f 100644 --- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c +++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c @@ -4,6 +4,7 @@ */ #include "display/intel_audio_regs.h" +#include "display/intel_backlight_regs.h" #include "display/intel_dmc_regs.h" #include "display/vlv_dsi_pll_regs.h" #include "gt/intel_gt_regs.h" diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c index 0fec25be146a..ba9843cb1b13 100644 --- a/drivers/gpu/drm/i915/intel_pch.c +++ b/drivers/gpu/drm/i915/intel_pch.c @@ -138,6 +138,11 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv) && !IS_ALDERLAKE_P(dev_priv)); return PCH_ADP; + case INTEL_PCH_MTP_DEVICE_ID_TYPE: + case INTEL_PCH_MTP2_DEVICE_ID_TYPE: + drm_dbg_kms(&dev_priv->drm, "Found Meteor Lake PCH\n"); + drm_WARN_ON(&dev_priv->drm, !IS_METEORLAKE(dev_priv)); + return PCH_MTP; default: return PCH_NONE; } @@ -166,7 +171,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv, * make an educated guess as to which PCH is really there. */ - if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) + if (IS_METEORLAKE(dev_priv)) + id = INTEL_PCH_MTP_DEVICE_ID_TYPE; + else if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) id = INTEL_PCH_ADP_DEVICE_ID_TYPE; else if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv)) id = INTEL_PCH_TGP_DEVICE_ID_TYPE; diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h index 7c8ce9781d1a..32aff5a70d04 100644 --- a/drivers/gpu/drm/i915/intel_pch.h +++ b/drivers/gpu/drm/i915/intel_pch.h @@ -25,6 +25,7 @@ enum intel_pch { PCH_ICP, /* Ice Lake/Jasper Lake PCH */ PCH_TGP, /* Tiger Lake/Mule Creek Canyon PCH */ PCH_ADP, /* Alder Lake PCH */ + PCH_MTP, /* Meteor Lake PCH */ /* Fake PCHs, functionality handled on the same PCI dev */ PCH_DG1 = 1024, @@ -57,12 +58,15 @@ enum intel_pch { #define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180 #define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00 #define INTEL_PCH_ADP4_DEVICE_ID_TYPE 0x5480 +#define INTEL_PCH_MTP_DEVICE_ID_TYPE 0x7E00 +#define INTEL_PCH_MTP2_DEVICE_ID_TYPE 0xAE00 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type) #define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id) +#define HAS_PCH_MTP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MTP) #define HAS_PCH_DG2(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG2) #define HAS_PCH_ADP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ADP) #define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1) diff --git a/drivers/gpu/drm/i915/intel_pci_config.h b/drivers/gpu/drm/i915/intel_pci_config.h index 12cd9d4f23de..4977a524ce6f 100644 --- a/drivers/gpu/drm/i915/intel_pci_config.h +++ b/drivers/gpu/drm/i915/intel_pci_config.h @@ -6,6 +6,13 @@ #ifndef __INTEL_PCI_CONFIG_H__ #define __INTEL_PCI_CONFIG_H__ +/* PCI BARs */ +#define GTTMMADR_BAR 0 +#define GEN2_GTTMMADR_BAR 1 +#define GFXMEM_BAR 2 +#define GTT_APERTURE_BAR GFXMEM_BAR +#define GEN12_LMEM_BAR GFXMEM_BAR + /* BSM in include/drm/i915_drm.h */ #define MCHBAR_I915 0x44 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ef7553b494ea..8f86f56e7ca4 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -25,60 +25,22 @@ * */ -#include <linux/module.h> -#include <linux/string_helpers.h> -#include <linux/pm_runtime.h> - -#include <drm/drm_atomic_helper.h> -#include <drm/drm_blend.h> -#include <drm/drm_fourcc.h> - -#include "display/intel_atomic.h" -#include "display/intel_atomic_plane.h" -#include "display/intel_bw.h" #include "display/intel_de.h" #include "display/intel_display_trace.h" -#include "display/intel_display_types.h" -#include "display/intel_fb.h" -#include "display/intel_fbc.h" -#include "display/intel_sprite.h" -#include "display/skl_universal_plane.h" +#include "display/skl_watermark.h" #include "gt/intel_engine_regs.h" #include "gt/intel_gt_regs.h" -#include "gt/intel_llc.h" #include "i915_drv.h" -#include "i915_fixed.h" -#include "i915_irq.h" #include "intel_mchbar_regs.h" -#include "intel_pcode.h" #include "intel_pm.h" #include "vlv_sideband.h" -#include "../../../platform/x86/intel_ips.h" - -static void skl_sagv_disable(struct drm_i915_private *dev_priv); struct drm_i915_clock_gating_funcs { void (*init_clock_gating)(struct drm_i915_private *i915); }; -/* Stores plane specific WM parameters */ -struct skl_wm_params { - bool x_tiled, y_tiled; - bool rc_surface; - bool is_planar; - u32 width; - u8 cpp; - u32 plane_pixel_rate; - u32 y_min_scanlines; - u32 plane_bytes_per_line; - uint_fixed_16_16_t plane_blocks_per_line; - uint_fixed_16_16_t y_tile_minimum; - u32 linetime_us; - u32 dbuf_block_size; -}; - /* used in computing the new watermarks state */ struct intel_wm_config { unsigned int num_pipes_active; @@ -468,13 +430,13 @@ bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) { bool ret; - mutex_lock(&dev_priv->wm.wm_mutex); + mutex_lock(&dev_priv->display.wm.wm_mutex); ret = _intel_set_memory_cxsr(dev_priv, enable); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - dev_priv->wm.vlv.cxsr = enable; + dev_priv->display.wm.vlv.cxsr = enable; else if (IS_G4X(dev_priv)) - dev_priv->wm.g4x.cxsr = enable; - mutex_unlock(&dev_priv->wm.wm_mutex); + dev_priv->display.wm.g4x.cxsr = enable; + mutex_unlock(&dev_priv->display.wm.wm_mutex); return ret; } @@ -834,11 +796,11 @@ static bool is_enabling(int old, int new, int threshold) static int intel_wm_num_levels(struct drm_i915_private *dev_priv) { - return dev_priv->wm.max_level + 1; + return dev_priv->display.wm.max_level + 1; } -static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); @@ -1093,11 +1055,11 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv, static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv) { /* all latencies in usec */ - dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5; - dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12; - dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35; + dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5; + dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12; + dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35; - dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL; + dev_priv->display.wm.max_level = G4X_WM_LEVEL_HPLL; } static int g4x_plane_fifo_size(enum plane_id plane_id, int level) @@ -1150,7 +1112,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; - unsigned int latency = dev_priv->wm.pri_latency[level] * 10; + unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10; unsigned int pixel_rate, htotal, cpp, width, wm; if (latency == 0) @@ -1324,7 +1286,7 @@ static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - if (level > dev_priv->wm.max_level) + if (level > dev_priv->display.wm.max_level) return false; return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && @@ -1583,7 +1545,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv, static void g4x_program_watermarks(struct drm_i915_private *dev_priv) { - struct g4x_wm_values *old_wm = &dev_priv->wm.g4x; + struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x; struct g4x_wm_values new_wm = {}; g4x_merge_wm(dev_priv, &new_wm); @@ -1609,10 +1571,10 @@ static void g4x_initial_watermarks(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - mutex_lock(&dev_priv->wm.wm_mutex); + mutex_lock(&dev_priv->display.wm.wm_mutex); crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate; g4x_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->wm.wm_mutex); + mutex_unlock(&dev_priv->display.wm.wm_mutex); } static void g4x_optimize_watermarks(struct intel_atomic_state *state, @@ -1625,10 +1587,10 @@ static void g4x_optimize_watermarks(struct intel_atomic_state *state, if (!crtc_state->wm.need_postvbl_update) return; - mutex_lock(&dev_priv->wm.wm_mutex); + mutex_lock(&dev_priv->display.wm.wm_mutex); crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; g4x_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->wm.wm_mutex); + mutex_unlock(&dev_priv->display.wm.wm_mutex); } /* latency must be in 0.1us units. */ @@ -1650,15 +1612,15 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate, static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv) { /* all latencies in usec */ - dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; + dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; - dev_priv->wm.max_level = VLV_WM_LEVEL_PM2; + dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM2; if (IS_CHERRYVIEW(dev_priv)) { - dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; - dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; + dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; + dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; - dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS; + dev_priv->display.wm.max_level = VLV_WM_LEVEL_DDR_DVFS; } } @@ -1672,7 +1634,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, &crtc_state->hw.pipe_mode; unsigned int pixel_rate, htotal, cpp, width, wm; - if (dev_priv->wm.pri_latency[level] == 0) + if (dev_priv->display.wm.pri_latency[level] == 0) return USHRT_MAX; if (!intel_wm_plane_visible(crtc_state, plane_state)) @@ -1693,7 +1655,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, wm = 63; } else { wm = vlv_wm_method2(pixel_rate, htotal, width, cpp, - dev_priv->wm.pri_latency[level] * 10); + dev_priv->display.wm.pri_latency[level] * 10); } return min_t(unsigned int, wm, USHRT_MAX); @@ -2158,7 +2120,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv, struct intel_crtc *crtc; int num_active_pipes = 0; - wm->level = dev_priv->wm.max_level; + wm->level = dev_priv->display.wm.max_level; wm->cxsr = true; for_each_intel_crtc(&dev_priv->drm, crtc) { @@ -2197,7 +2159,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv, static void vlv_program_watermarks(struct drm_i915_private *dev_priv) { - struct vlv_wm_values *old_wm = &dev_priv->wm.vlv; + struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv; struct vlv_wm_values new_wm = {}; vlv_merge_wm(dev_priv, &new_wm); @@ -2235,10 +2197,10 @@ static void vlv_initial_watermarks(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - mutex_lock(&dev_priv->wm.wm_mutex); + mutex_lock(&dev_priv->display.wm.wm_mutex); crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate; vlv_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->wm.wm_mutex); + mutex_unlock(&dev_priv->display.wm.wm_mutex); } static void vlv_optimize_watermarks(struct intel_atomic_state *state, @@ -2251,10 +2213,10 @@ static void vlv_optimize_watermarks(struct intel_atomic_state *state, if (!crtc_state->wm.need_postvbl_update) return; - mutex_lock(&dev_priv->wm.wm_mutex); + mutex_lock(&dev_priv->display.wm.wm_mutex); crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; vlv_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->wm.wm_mutex); + mutex_unlock(&dev_priv->display.wm.wm_mutex); } static void i965_update_wm(struct drm_i915_private *dev_priv) @@ -2835,9 +2797,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, const struct intel_plane_state *curstate, struct intel_wm_level *result) { - u16 pri_latency = dev_priv->wm.pri_latency[level]; - u16 spr_latency = dev_priv->wm.spr_latency[level]; - u16 cur_latency = dev_priv->wm.cur_latency[level]; + u16 pri_latency = dev_priv->display.wm.pri_latency[level]; + u16 spr_latency = dev_priv->display.wm.spr_latency[level]; + u16 cur_latency = dev_priv->display.wm.cur_latency[level]; /* WM1+ latency values stored in 0.5us units */ if (level > 0) { @@ -2861,119 +2823,43 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, result->enable = true; } -static void intel_read_wm_latency(struct drm_i915_private *dev_priv, - u16 wm[]) +static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) { - struct intel_uncore *uncore = &dev_priv->uncore; - - if (DISPLAY_VER(dev_priv) >= 9) { - u32 val; - int ret, i; - int level, max_level = ilk_wm_max_level(dev_priv); - int mult = IS_DG2(dev_priv) ? 2 : 1; - - /* read the first set of memory latencies[0:3] */ - val = 0; /* data0 to be programmed to 0 for first set */ - ret = snb_pcode_read(&dev_priv->uncore, GEN9_PCODE_READ_MEM_LATENCY, - &val, NULL); + u64 sskpd; - if (ret) { - drm_err(&dev_priv->drm, - "SKL Mailbox read error = %d\n", ret); - return; - } - - wm[0] = (val & GEN9_MEM_LATENCY_LEVEL_MASK) * mult; - wm[1] = ((val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) & - GEN9_MEM_LATENCY_LEVEL_MASK) * mult; - wm[2] = ((val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) & - GEN9_MEM_LATENCY_LEVEL_MASK) * mult; - wm[3] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) & - GEN9_MEM_LATENCY_LEVEL_MASK) * mult; - - /* read the second set of memory latencies[4:7] */ - val = 1; /* data0 to be programmed to 1 for second set */ - ret = snb_pcode_read(&dev_priv->uncore, GEN9_PCODE_READ_MEM_LATENCY, - &val, NULL); - if (ret) { - drm_err(&dev_priv->drm, - "SKL Mailbox read error = %d\n", ret); - return; - } + sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD); - wm[4] = (val & GEN9_MEM_LATENCY_LEVEL_MASK) * mult; - wm[5] = ((val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) & - GEN9_MEM_LATENCY_LEVEL_MASK) * mult; - wm[6] = ((val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) & - GEN9_MEM_LATENCY_LEVEL_MASK) * mult; - wm[7] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) & - GEN9_MEM_LATENCY_LEVEL_MASK) * mult; + wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd); + if (wm[0] == 0) + wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd); + wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd); + wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd); + wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd); + wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd); +} - /* - * If a level n (n > 1) has a 0us latency, all levels m (m >= n) - * need to be disabled. We make sure to sanitize the values out - * of the punit to satisfy this requirement. - */ - for (level = 1; level <= max_level; level++) { - if (wm[level] == 0) { - for (i = level + 1; i <= max_level; i++) - wm[i] = 0; +static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) +{ + u32 sskpd; - max_level = level - 1; + sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD); - break; - } - } + wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd); + wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd); + wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd); + wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd); +} - /* - * WaWmMemoryReadLatency - * - * punit doesn't take into account the read latency so we need - * to add proper adjustement to each valid level we retrieve - * from the punit when level 0 response data is 0us. - */ - if (wm[0] == 0) { - u8 adjust = DISPLAY_VER(dev_priv) >= 12 ? 3 : 2; +static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) +{ + u32 mltr; - for (level = 0; level <= max_level; level++) - wm[level] += adjust; - } + mltr = intel_uncore_read(&i915->uncore, MLTR_ILK); - /* - * WA Level-0 adjustment for 16GB DIMMs: SKL+ - * If we could not get dimm info enable this WA to prevent from - * any underrun. If not able to get Dimm info assume 16GB dimm - * to avoid any underrun. - */ - if (dev_priv->dram_info.wm_lv_0_adjust_needed) - wm[0] += 1; - } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD); - - wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd); - if (wm[0] == 0) - wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd); - wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd); - wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd); - wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd); - wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd); - } else if (DISPLAY_VER(dev_priv) >= 6) { - u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD); - - wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd); - wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd); - wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd); - wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd); - } else if (DISPLAY_VER(dev_priv) >= 5) { - u32 mltr = intel_uncore_read(uncore, MLTR_ILK); - - /* ILK primary LP0 latency is 700 ns */ - wm[0] = 7; - wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr); - wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr); - } else { - MISSING_CASE(INTEL_DEVID(dev_priv)); - } + /* ILK primary LP0 latency is 700 ns */ + wm[0] = 7; + wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr); + wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr); } static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv, @@ -3007,9 +2893,8 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv) return 2; } -static void intel_print_wm_latency(struct drm_i915_private *dev_priv, - const char *name, - const u16 wm[]) +void intel_print_wm_latency(struct drm_i915_private *dev_priv, + const char *name, const u16 wm[]) { int level, max_level = ilk_wm_max_level(dev_priv); @@ -3061,18 +2946,18 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) * The BIOS provided WM memory latency values are often * inadequate for high resolution displays. Adjust them. */ - changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12); - changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12); - changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); + changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12); + changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12); + changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12); if (!changed) return; drm_dbg_kms(&dev_priv->drm, "WM latency values increased to avoid potential underruns\n"); - intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); - intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); - intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); + intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); + intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); + intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); } static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv) @@ -3088,37 +2973,42 @@ static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv) * interrupts only. To play it safe we disable LP3 * watermarks entirely. */ - if (dev_priv->wm.pri_latency[3] == 0 && - dev_priv->wm.spr_latency[3] == 0 && - dev_priv->wm.cur_latency[3] == 0) + if (dev_priv->display.wm.pri_latency[3] == 0 && + dev_priv->display.wm.spr_latency[3] == 0 && + dev_priv->display.wm.cur_latency[3] == 0) return; - dev_priv->wm.pri_latency[3] = 0; - dev_priv->wm.spr_latency[3] = 0; - dev_priv->wm.cur_latency[3] = 0; + dev_priv->display.wm.pri_latency[3] = 0; + dev_priv->display.wm.spr_latency[3] = 0; + dev_priv->display.wm.cur_latency[3] = 0; drm_dbg_kms(&dev_priv->drm, "LP3 watermarks disabled due to potential for lost interrupts\n"); - intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); - intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); - intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); + intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); + intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); + intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); } static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) { - intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency); + if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) + hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); + else if (DISPLAY_VER(dev_priv) >= 6) + snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); + else + ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); - memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency, - sizeof(dev_priv->wm.pri_latency)); - memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency, - sizeof(dev_priv->wm.pri_latency)); + memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency, + sizeof(dev_priv->display.wm.pri_latency)); + memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency, + sizeof(dev_priv->display.wm.pri_latency)); - intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency); - intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency); + intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency); + intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency); - intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); - intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); - intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); + intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); + intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); + intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); if (DISPLAY_VER(dev_priv) == 6) { snb_wm_latency_quirk(dev_priv); @@ -3126,12 +3016,6 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) } } -static void skl_setup_wm_latency(struct drm_i915_private *dev_priv) -{ - intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency); - intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency); -} - static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv, struct intel_pipe_wm *pipe_wm) { @@ -3386,7 +3270,7 @@ static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv, if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) return 2 * level; else - return dev_priv->wm.pri_latency[level]; + return dev_priv->display.wm.pri_latency[level]; } static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, @@ -3538,7 +3422,7 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv, static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, unsigned int dirty) { - struct ilk_wm_values *previous = &dev_priv->wm.hw; + struct ilk_wm_values *previous = &dev_priv->display.wm.hw; bool changed = false; if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) { @@ -3572,7 +3456,7 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, static void ilk_write_wm_values(struct drm_i915_private *dev_priv, struct ilk_wm_values *results) { - struct ilk_wm_values *previous = &dev_priv->wm.hw; + struct ilk_wm_values *previous = &dev_priv->display.wm.hw; unsigned int dirty; u32 val; @@ -3634,7 +3518,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv, if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2]) intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]); - dev_priv->wm.hw = *results; + dev_priv->display.wm.hw = *results; } bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv) @@ -3642,2765 +3526,6 @@ bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv) return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); } -u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv) -{ - u8 enabled_slices = 0; - enum dbuf_slice slice; - - for_each_dbuf_slice(dev_priv, slice) { - if (intel_uncore_read(&dev_priv->uncore, - DBUF_CTL_S(slice)) & DBUF_POWER_STATE) - enabled_slices |= BIT(slice); - } - - return enabled_slices; -} - -/* - * FIXME: We still don't have the proper code detect if we need to apply the WA, - * so assume we'll always need it in order to avoid underruns. - */ -static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv) -{ - return DISPLAY_VER(dev_priv) == 9; -} - -static bool -intel_has_sagv(struct drm_i915_private *dev_priv) -{ - return DISPLAY_VER(dev_priv) >= 9 && !IS_LP(dev_priv) && - dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED; -} - -static u32 -intel_sagv_block_time(struct drm_i915_private *dev_priv) -{ - if (DISPLAY_VER(dev_priv) >= 12) { - u32 val = 0; - int ret; - - ret = snb_pcode_read(&dev_priv->uncore, - GEN12_PCODE_READ_SAGV_BLOCK_TIME_US, - &val, NULL); - if (ret) { - drm_dbg_kms(&dev_priv->drm, "Couldn't read SAGV block time!\n"); - return 0; - } - - return val; - } else if (DISPLAY_VER(dev_priv) == 11) { - return 10; - } else if (DISPLAY_VER(dev_priv) == 9 && !IS_LP(dev_priv)) { - return 30; - } else { - return 0; - } -} - -static void intel_sagv_init(struct drm_i915_private *i915) -{ - if (!intel_has_sagv(i915)) - i915->sagv_status = I915_SAGV_NOT_CONTROLLED; - - /* - * Probe to see if we have working SAGV control. - * For icl+ this was already determined by intel_bw_init_hw(). - */ - if (DISPLAY_VER(i915) < 11) - skl_sagv_disable(i915); - - drm_WARN_ON(&i915->drm, i915->sagv_status == I915_SAGV_UNKNOWN); - - i915->sagv_block_time_us = intel_sagv_block_time(i915); - - drm_dbg_kms(&i915->drm, "SAGV supported: %s, original SAGV block time: %u us\n", - str_yes_no(intel_has_sagv(i915)), i915->sagv_block_time_us); - - /* avoid overflow when adding with wm0 latency/etc. */ - if (drm_WARN(&i915->drm, i915->sagv_block_time_us > U16_MAX, - "Excessive SAGV block time %u, ignoring\n", - i915->sagv_block_time_us)) - i915->sagv_block_time_us = 0; - - if (!intel_has_sagv(i915)) - i915->sagv_block_time_us = 0; -} - -/* - * SAGV dynamically adjusts the system agent voltage and clock frequencies - * depending on power and performance requirements. The display engine access - * to system memory is blocked during the adjustment time. Because of the - * blocking time, having this enabled can cause full system hangs and/or pipe - * underruns if we don't meet all of the following requirements: - * - * - <= 1 pipe enabled - * - All planes can enable watermarks for latencies >= SAGV engine block time - * - We're not using an interlaced display configuration - */ -static void skl_sagv_enable(struct drm_i915_private *dev_priv) -{ - int ret; - - if (!intel_has_sagv(dev_priv)) - return; - - if (dev_priv->sagv_status == I915_SAGV_ENABLED) - return; - - drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n"); - ret = snb_pcode_write(&dev_priv->uncore, GEN9_PCODE_SAGV_CONTROL, - GEN9_SAGV_ENABLE); - - /* We don't need to wait for SAGV when enabling */ - - /* - * Some skl systems, pre-release machines in particular, - * don't actually have SAGV. - */ - if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) { - drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n"); - dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; - return; - } else if (ret < 0) { - drm_err(&dev_priv->drm, "Failed to enable SAGV\n"); - return; - } - - dev_priv->sagv_status = I915_SAGV_ENABLED; -} - -static void skl_sagv_disable(struct drm_i915_private *dev_priv) -{ - int ret; - - if (!intel_has_sagv(dev_priv)) - return; - - if (dev_priv->sagv_status == I915_SAGV_DISABLED) - return; - - drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n"); - /* bspec says to keep retrying for at least 1 ms */ - ret = skl_pcode_request(&dev_priv->uncore, GEN9_PCODE_SAGV_CONTROL, - GEN9_SAGV_DISABLE, - GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, - 1); - /* - * Some skl systems, pre-release machines in particular, - * don't actually have SAGV. - */ - if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) { - drm_dbg(&dev_priv->drm, "No SAGV found on system, ignoring\n"); - dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; - return; - } else if (ret < 0) { - drm_err(&dev_priv->drm, "Failed to disable SAGV (%d)\n", ret); - return; - } - - dev_priv->sagv_status = I915_SAGV_DISABLED; -} - -static void skl_sagv_pre_plane_update(struct intel_atomic_state *state) -{ - struct drm_i915_private *i915 = to_i915(state->base.dev); - const struct intel_bw_state *new_bw_state = - intel_atomic_get_new_bw_state(state); - - if (!new_bw_state) - return; - - if (!intel_can_enable_sagv(i915, new_bw_state)) - skl_sagv_disable(i915); -} - -static void skl_sagv_post_plane_update(struct intel_atomic_state *state) -{ - struct drm_i915_private *i915 = to_i915(state->base.dev); - const struct intel_bw_state *new_bw_state = - intel_atomic_get_new_bw_state(state); - - if (!new_bw_state) - return; - - if (intel_can_enable_sagv(i915, new_bw_state)) - skl_sagv_enable(i915); -} - -static void icl_sagv_pre_plane_update(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_bw_state *old_bw_state = - intel_atomic_get_old_bw_state(state); - const struct intel_bw_state *new_bw_state = - intel_atomic_get_new_bw_state(state); - u16 old_mask, new_mask; - - if (!new_bw_state) - return; - - old_mask = old_bw_state->qgv_points_mask; - new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask; - - if (old_mask == new_mask) - return; - - WARN_ON(!new_bw_state->base.changed); - - drm_dbg_kms(&dev_priv->drm, "Restricting QGV points: 0x%x -> 0x%x\n", - old_mask, new_mask); - - /* - * Restrict required qgv points before updating the configuration. - * According to BSpec we can't mask and unmask qgv points at the same - * time. Also masking should be done before updating the configuration - * and unmasking afterwards. - */ - icl_pcode_restrict_qgv_points(dev_priv, new_mask); -} - -static void icl_sagv_post_plane_update(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_bw_state *old_bw_state = - intel_atomic_get_old_bw_state(state); - const struct intel_bw_state *new_bw_state = - intel_atomic_get_new_bw_state(state); - u16 old_mask, new_mask; - - if (!new_bw_state) - return; - - old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask; - new_mask = new_bw_state->qgv_points_mask; - - if (old_mask == new_mask) - return; - - WARN_ON(!new_bw_state->base.changed); - - drm_dbg_kms(&dev_priv->drm, "Relaxing QGV points: 0x%x -> 0x%x\n", - old_mask, new_mask); - - /* - * Allow required qgv points after updating the configuration. - * According to BSpec we can't mask and unmask qgv points at the same - * time. Also masking should be done before updating the configuration - * and unmasking afterwards. - */ - icl_pcode_restrict_qgv_points(dev_priv, new_mask); -} - -void intel_sagv_pre_plane_update(struct intel_atomic_state *state) -{ - struct drm_i915_private *i915 = to_i915(state->base.dev); - - /* - * Just return if we can't control SAGV or don't have it. - * This is different from situation when we have SAGV but just can't - * afford it due to DBuf limitation - in case if SAGV is completely - * disabled in a BIOS, we are not even allowed to send a PCode request, - * as it will throw an error. So have to check it here. - */ - if (!intel_has_sagv(i915)) - return; - - if (DISPLAY_VER(i915) >= 11) - icl_sagv_pre_plane_update(state); - else - skl_sagv_pre_plane_update(state); -} - -void intel_sagv_post_plane_update(struct intel_atomic_state *state) -{ - struct drm_i915_private *i915 = to_i915(state->base.dev); - - /* - * Just return if we can't control SAGV or don't have it. - * This is different from situation when we have SAGV but just can't - * afford it due to DBuf limitation - in case if SAGV is completely - * disabled in a BIOS, we are not even allowed to send a PCode request, - * as it will throw an error. So have to check it here. - */ - if (!intel_has_sagv(i915)) - return; - - if (DISPLAY_VER(i915) >= 11) - icl_sagv_post_plane_update(state); - else - skl_sagv_post_plane_update(state); -} - -static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum plane_id plane_id; - int max_level = INT_MAX; - - if (!intel_has_sagv(dev_priv)) - return false; - - if (!crtc_state->hw.active) - return true; - - if (crtc_state->hw.pipe_mode.flags & DRM_MODE_FLAG_INTERLACE) - return false; - - for_each_plane_id_on_crtc(crtc, plane_id) { - const struct skl_plane_wm *wm = - &crtc_state->wm.skl.optimal.planes[plane_id]; - int level; - - /* Skip this plane if it's not enabled */ - if (!wm->wm[0].enable) - continue; - - /* Find the highest enabled wm level for this plane */ - for (level = ilk_wm_max_level(dev_priv); - !wm->wm[level].enable; --level) - { } - - /* Highest common enabled wm level for all planes */ - max_level = min(level, max_level); - } - - /* No enabled planes? */ - if (max_level == INT_MAX) - return true; - - for_each_plane_id_on_crtc(crtc, plane_id) { - const struct skl_plane_wm *wm = - &crtc_state->wm.skl.optimal.planes[plane_id]; - - /* - * All enabled planes must have enabled a common wm level that - * can tolerate memory latencies higher than sagv_block_time_us - */ - if (wm->wm[0].enable && !wm->wm[max_level].can_sagv) - return false; - } - - return true; -} - -static bool tgl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - enum plane_id plane_id; - - if (!crtc_state->hw.active) - return true; - - for_each_plane_id_on_crtc(crtc, plane_id) { - const struct skl_plane_wm *wm = - &crtc_state->wm.skl.optimal.planes[plane_id]; - - if (wm->wm[0].enable && !wm->sagv.wm0.enable) - return false; - } - - return true; -} - -static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - - if (DISPLAY_VER(dev_priv) >= 12) - return tgl_crtc_can_enable_sagv(crtc_state); - else - return skl_crtc_can_enable_sagv(crtc_state); -} - -bool intel_can_enable_sagv(struct drm_i915_private *dev_priv, - const struct intel_bw_state *bw_state) -{ - if (DISPLAY_VER(dev_priv) < 11 && - bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes)) - return false; - - return bw_state->pipe_sagv_reject == 0; -} - -static int intel_compute_sagv_mask(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - int ret; - struct intel_crtc *crtc; - struct intel_crtc_state *new_crtc_state; - struct intel_bw_state *new_bw_state = NULL; - const struct intel_bw_state *old_bw_state = NULL; - int i; - - for_each_new_intel_crtc_in_state(state, crtc, - new_crtc_state, i) { - new_bw_state = intel_atomic_get_bw_state(state); - if (IS_ERR(new_bw_state)) - return PTR_ERR(new_bw_state); - - old_bw_state = intel_atomic_get_old_bw_state(state); - - if (intel_crtc_can_enable_sagv(new_crtc_state)) - new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe); - else - new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe); - } - - if (!new_bw_state) - return 0; - - new_bw_state->active_pipes = - intel_calc_active_pipes(state, old_bw_state->active_pipes); - - if (new_bw_state->active_pipes != old_bw_state->active_pipes) { - ret = intel_atomic_lock_global_state(&new_bw_state->base); - if (ret) - return ret; - } - - if (intel_can_enable_sagv(dev_priv, new_bw_state) != - intel_can_enable_sagv(dev_priv, old_bw_state)) { - ret = intel_atomic_serialize_global_state(&new_bw_state->base); - if (ret) - return ret; - } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { - ret = intel_atomic_lock_global_state(&new_bw_state->base); - if (ret) - return ret; - } - - for_each_new_intel_crtc_in_state(state, crtc, - new_crtc_state, i) { - struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal; - - /* - * We store use_sagv_wm in the crtc state rather than relying on - * that bw state since we have no convenient way to get at the - * latter from the plane commit hooks (especially in the legacy - * cursor case) - */ - pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(dev_priv) && - DISPLAY_VER(dev_priv) >= 12 && - intel_can_enable_sagv(dev_priv, new_bw_state); - } - - return 0; -} - -static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry, - u16 start, u16 end) -{ - entry->start = start; - entry->end = end; - - return end; -} - -static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv) -{ - return INTEL_INFO(dev_priv)->display.dbuf.size / - hweight8(INTEL_INFO(dev_priv)->display.dbuf.slice_mask); -} - -static void -skl_ddb_entry_for_slices(struct drm_i915_private *dev_priv, u8 slice_mask, - struct skl_ddb_entry *ddb) -{ - int slice_size = intel_dbuf_slice_size(dev_priv); - - if (!slice_mask) { - ddb->start = 0; - ddb->end = 0; - return; - } - - ddb->start = (ffs(slice_mask) - 1) * slice_size; - ddb->end = fls(slice_mask) * slice_size; - - WARN_ON(ddb->start >= ddb->end); - WARN_ON(ddb->end > INTEL_INFO(dev_priv)->display.dbuf.size); -} - -static unsigned int mbus_ddb_offset(struct drm_i915_private *i915, u8 slice_mask) -{ - struct skl_ddb_entry ddb; - - if (slice_mask & (BIT(DBUF_S1) | BIT(DBUF_S2))) - slice_mask = BIT(DBUF_S1); - else if (slice_mask & (BIT(DBUF_S3) | BIT(DBUF_S4))) - slice_mask = BIT(DBUF_S3); - - skl_ddb_entry_for_slices(i915, slice_mask, &ddb); - - return ddb.start; -} - -u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv, - const struct skl_ddb_entry *entry) -{ - int slice_size = intel_dbuf_slice_size(dev_priv); - enum dbuf_slice start_slice, end_slice; - u8 slice_mask = 0; - - if (!skl_ddb_entry_size(entry)) - return 0; - - start_slice = entry->start / slice_size; - end_slice = (entry->end - 1) / slice_size; - - /* - * Per plane DDB entry can in a really worst case be on multiple slices - * but single entry is anyway contigious. - */ - while (start_slice <= end_slice) { - slice_mask |= BIT(start_slice); - start_slice++; - } - - return slice_mask; -} - -static unsigned int intel_crtc_ddb_weight(const struct intel_crtc_state *crtc_state) -{ - const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; - int hdisplay, vdisplay; - - if (!crtc_state->hw.active) - return 0; - - /* - * Watermark/ddb requirement highly depends upon width of the - * framebuffer, So instead of allocating DDB equally among pipes - * distribute DDB based on resolution/width of the display. - */ - drm_mode_get_hv_timing(pipe_mode, &hdisplay, &vdisplay); - - return hdisplay; -} - -static void intel_crtc_dbuf_weights(const struct intel_dbuf_state *dbuf_state, - enum pipe for_pipe, - unsigned int *weight_start, - unsigned int *weight_end, - unsigned int *weight_total) -{ - struct drm_i915_private *dev_priv = - to_i915(dbuf_state->base.state->base.dev); - enum pipe pipe; - - *weight_start = 0; - *weight_end = 0; - *weight_total = 0; - - for_each_pipe(dev_priv, pipe) { - int weight = dbuf_state->weight[pipe]; - - /* - * Do not account pipes using other slice sets - * luckily as of current BSpec slice sets do not partially - * intersect(pipes share either same one slice or same slice set - * i.e no partial intersection), so it is enough to check for - * equality for now. - */ - if (dbuf_state->slices[pipe] != dbuf_state->slices[for_pipe]) - continue; - - *weight_total += weight; - if (pipe < for_pipe) { - *weight_start += weight; - *weight_end += weight; - } else if (pipe == for_pipe) { - *weight_end += weight; - } - } -} - -static int -skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - unsigned int weight_total, weight_start, weight_end; - const struct intel_dbuf_state *old_dbuf_state = - intel_atomic_get_old_dbuf_state(state); - struct intel_dbuf_state *new_dbuf_state = - intel_atomic_get_new_dbuf_state(state); - struct intel_crtc_state *crtc_state; - struct skl_ddb_entry ddb_slices; - enum pipe pipe = crtc->pipe; - unsigned int mbus_offset = 0; - u32 ddb_range_size; - u32 dbuf_slice_mask; - u32 start, end; - int ret; - - if (new_dbuf_state->weight[pipe] == 0) { - skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0); - goto out; - } - - dbuf_slice_mask = new_dbuf_state->slices[pipe]; - - skl_ddb_entry_for_slices(dev_priv, dbuf_slice_mask, &ddb_slices); - mbus_offset = mbus_ddb_offset(dev_priv, dbuf_slice_mask); - ddb_range_size = skl_ddb_entry_size(&ddb_slices); - - intel_crtc_dbuf_weights(new_dbuf_state, pipe, - &weight_start, &weight_end, &weight_total); - - start = ddb_range_size * weight_start / weight_total; - end = ddb_range_size * weight_end / weight_total; - - skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], - ddb_slices.start - mbus_offset + start, - ddb_slices.start - mbus_offset + end); - -out: - if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] && - skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe], - &new_dbuf_state->ddb[pipe])) - return 0; - - ret = intel_atomic_lock_global_state(&new_dbuf_state->base); - if (ret) - return ret; - - crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); - if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); - - /* - * Used for checking overlaps, so we need absolute - * offsets instead of MBUS relative offsets. - */ - crtc_state->wm.skl.ddb.start = mbus_offset + new_dbuf_state->ddb[pipe].start; - crtc_state->wm.skl.ddb.end = mbus_offset + new_dbuf_state->ddb[pipe].end; - - drm_dbg_kms(&dev_priv->drm, - "[CRTC:%d:%s] dbuf slices 0x%x -> 0x%x, ddb (%d - %d) -> (%d - %d), active pipes 0x%x -> 0x%x\n", - crtc->base.base.id, crtc->base.name, - old_dbuf_state->slices[pipe], new_dbuf_state->slices[pipe], - old_dbuf_state->ddb[pipe].start, old_dbuf_state->ddb[pipe].end, - new_dbuf_state->ddb[pipe].start, new_dbuf_state->ddb[pipe].end, - old_dbuf_state->active_pipes, new_dbuf_state->active_pipes); - - return 0; -} - -static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state, - int width, const struct drm_format_info *format, - u64 modifier, unsigned int rotation, - u32 plane_pixel_rate, struct skl_wm_params *wp, - int color_plane); - -static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, - struct intel_plane *plane, - int level, - unsigned int latency, - const struct skl_wm_params *wp, - const struct skl_wm_level *result_prev, - struct skl_wm_level *result /* out */); - -static unsigned int -skl_cursor_allocation(const struct intel_crtc_state *crtc_state, - int num_active) -{ - struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor); - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - int level, max_level = ilk_wm_max_level(dev_priv); - struct skl_wm_level wm = {}; - int ret, min_ddb_alloc = 0; - struct skl_wm_params wp; - - ret = skl_compute_wm_params(crtc_state, 256, - drm_format_info(DRM_FORMAT_ARGB8888), - DRM_FORMAT_MOD_LINEAR, - DRM_MODE_ROTATE_0, - crtc_state->pixel_rate, &wp, 0); - drm_WARN_ON(&dev_priv->drm, ret); - - for (level = 0; level <= max_level; level++) { - unsigned int latency = dev_priv->wm.skl_latency[level]; - - skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm); - if (wm.min_ddb_alloc == U16_MAX) - break; - - min_ddb_alloc = wm.min_ddb_alloc; - } - - return max(num_active == 1 ? 32 : 8, min_ddb_alloc); -} - -static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg) -{ - skl_ddb_entry_init(entry, - REG_FIELD_GET(PLANE_BUF_START_MASK, reg), - REG_FIELD_GET(PLANE_BUF_END_MASK, reg)); - if (entry->end) - entry->end++; -} - -static void -skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv, - const enum pipe pipe, - const enum plane_id plane_id, - struct skl_ddb_entry *ddb, - struct skl_ddb_entry *ddb_y) -{ - u32 val; - - /* Cursor doesn't support NV12/planar, so no extra calculation needed */ - if (plane_id == PLANE_CURSOR) { - val = intel_uncore_read(&dev_priv->uncore, CUR_BUF_CFG(pipe)); - skl_ddb_entry_init_from_hw(ddb, val); - return; - } - - val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id)); - skl_ddb_entry_init_from_hw(ddb, val); - - if (DISPLAY_VER(dev_priv) >= 11) - return; - - val = intel_uncore_read(&dev_priv->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id)); - skl_ddb_entry_init_from_hw(ddb_y, val); -} - -static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, - struct skl_ddb_entry *ddb, - struct skl_ddb_entry *ddb_y) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum intel_display_power_domain power_domain; - enum pipe pipe = crtc->pipe; - intel_wakeref_t wakeref; - enum plane_id plane_id; - - power_domain = POWER_DOMAIN_PIPE(pipe); - wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); - if (!wakeref) - return; - - for_each_plane_id_on_crtc(crtc, plane_id) - skl_ddb_get_hw_plane_state(dev_priv, pipe, - plane_id, - &ddb[plane_id], - &ddb_y[plane_id]); - - intel_display_power_put(dev_priv, power_domain, wakeref); -} - -struct dbuf_slice_conf_entry { - u8 active_pipes; - u8 dbuf_mask[I915_MAX_PIPES]; - bool join_mbus; -}; - -/* - * Table taken from Bspec 12716 - * Pipes do have some preferred DBuf slice affinity, - * plus there are some hardcoded requirements on how - * those should be distributed for multipipe scenarios. - * For more DBuf slices algorithm can get even more messy - * and less readable, so decided to use a table almost - * as is from BSpec itself - that way it is at least easier - * to compare, change and check. - */ -static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] = -/* Autogenerated with igt/tools/intel_dbuf_map tool: */ -{ - { - .active_pipes = BIT(PIPE_A), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - }, - }, - { - .active_pipes = BIT(PIPE_B), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S1), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_B] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_C), - .dbuf_mask = { - [PIPE_C] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_C] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S1), - [PIPE_C] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_B] = BIT(DBUF_S1), - [PIPE_C] = BIT(DBUF_S2), - }, - }, - {} -}; - -/* - * Table taken from Bspec 49255 - * Pipes do have some preferred DBuf slice affinity, - * plus there are some hardcoded requirements on how - * those should be distributed for multipipe scenarios. - * For more DBuf slices algorithm can get even more messy - * and less readable, so decided to use a table almost - * as is from BSpec itself - that way it is at least easier - * to compare, change and check. - */ -static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] = -/* Autogenerated with igt/tools/intel_dbuf_map tool: */ -{ - { - .active_pipes = BIT(PIPE_A), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_B), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S2), - [PIPE_B] = BIT(DBUF_S1), - }, - }, - { - .active_pipes = BIT(PIPE_C), - .dbuf_mask = { - [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_C] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S1), - [PIPE_C] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_B] = BIT(DBUF_S1), - [PIPE_C] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_D), - .dbuf_mask = { - [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_D] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_B) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S1), - [PIPE_D] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_B] = BIT(DBUF_S1), - [PIPE_D] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_C] = BIT(DBUF_S1), - [PIPE_D] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_C] = BIT(DBUF_S2), - [PIPE_D] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S1), - [PIPE_C] = BIT(DBUF_S2), - [PIPE_D] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_B] = BIT(DBUF_S1), - [PIPE_C] = BIT(DBUF_S2), - [PIPE_D] = BIT(DBUF_S2), - }, - }, - {} -}; - -static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = { - { - .active_pipes = BIT(PIPE_A), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_B), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_B] = BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_C), - .dbuf_mask = { - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_B] = BIT(DBUF_S2), - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_D), - .dbuf_mask = { - [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_B) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_B] = BIT(DBUF_S2), - [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_C] = BIT(DBUF_S3), - [PIPE_D] = BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_C] = BIT(DBUF_S3), - [PIPE_D] = BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_C] = BIT(DBUF_S3), - [PIPE_D] = BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1), - [PIPE_B] = BIT(DBUF_S2), - [PIPE_C] = BIT(DBUF_S3), - [PIPE_D] = BIT(DBUF_S4), - }, - }, - {} -}; - -static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = { - /* - * Keep the join_mbus cases first so check_mbus_joined() - * will prefer them over the !join_mbus cases. - */ - { - .active_pipes = BIT(PIPE_A), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4), - }, - .join_mbus = true, - }, - { - .active_pipes = BIT(PIPE_B), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | BIT(DBUF_S4), - }, - .join_mbus = true, - }, - { - .active_pipes = BIT(PIPE_A), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - .join_mbus = false, - }, - { - .active_pipes = BIT(PIPE_B), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - .join_mbus = false, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_C), - .dbuf_mask = { - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - }, - }, - { - .active_pipes = BIT(PIPE_D), - .dbuf_mask = { - [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_B) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), - [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), - [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - { - .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), - .dbuf_mask = { - [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), - [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4), - [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4), - [PIPE_D] = BIT(DBUF_S1) | BIT(DBUF_S2), - }, - }, - {} - -}; - -static bool check_mbus_joined(u8 active_pipes, - const struct dbuf_slice_conf_entry *dbuf_slices) -{ - int i; - - for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { - if (dbuf_slices[i].active_pipes == active_pipes) - return dbuf_slices[i].join_mbus; - } - return false; -} - -static bool adlp_check_mbus_joined(u8 active_pipes) -{ - return check_mbus_joined(active_pipes, adlp_allowed_dbufs); -} - -static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus, - const struct dbuf_slice_conf_entry *dbuf_slices) -{ - int i; - - for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { - if (dbuf_slices[i].active_pipes == active_pipes && - dbuf_slices[i].join_mbus == join_mbus) - return dbuf_slices[i].dbuf_mask[pipe]; - } - return 0; -} - -/* - * This function finds an entry with same enabled pipe configuration and - * returns correspondent DBuf slice mask as stated in BSpec for particular - * platform. - */ -static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) -{ - /* - * FIXME: For ICL this is still a bit unclear as prev BSpec revision - * required calculating "pipe ratio" in order to determine - * if one or two slices can be used for single pipe configurations - * as additional constraint to the existing table. - * However based on recent info, it should be not "pipe ratio" - * but rather ratio between pixel_rate and cdclk with additional - * constants, so for now we are using only table until this is - * clarified. Also this is the reason why crtc_state param is - * still here - we will need it once those additional constraints - * pop up. - */ - return compute_dbuf_slices(pipe, active_pipes, join_mbus, - icl_allowed_dbufs); -} - -static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) -{ - return compute_dbuf_slices(pipe, active_pipes, join_mbus, - tgl_allowed_dbufs); -} - -static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) -{ - return compute_dbuf_slices(pipe, active_pipes, join_mbus, - adlp_allowed_dbufs); -} - -static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus) -{ - return compute_dbuf_slices(pipe, active_pipes, join_mbus, - dg2_allowed_dbufs); -} - -static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - - if (IS_DG2(dev_priv)) - return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus); - else if (IS_ALDERLAKE_P(dev_priv)) - return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus); - else if (DISPLAY_VER(dev_priv) == 12) - return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus); - else if (DISPLAY_VER(dev_priv) == 11) - return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus); - /* - * For anything else just return one slice yet. - * Should be extended for other platforms. - */ - return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0; -} - -static bool -use_minimal_wm0_only(const struct intel_crtc_state *crtc_state, - struct intel_plane *plane) -{ - struct drm_i915_private *i915 = to_i915(plane->base.dev); - - return DISPLAY_VER(i915) >= 13 && - crtc_state->uapi.async_flip && - plane->async_flip; -} - -static u64 -skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - enum plane_id plane_id; - u64 data_rate = 0; - - for_each_plane_id_on_crtc(crtc, plane_id) { - if (plane_id == PLANE_CURSOR) - continue; - - data_rate += crtc_state->rel_data_rate[plane_id]; - - if (DISPLAY_VER(i915) < 11) - data_rate += crtc_state->rel_data_rate_y[plane_id]; - } - - return data_rate; -} - -static const struct skl_wm_level * -skl_plane_wm_level(const struct skl_pipe_wm *pipe_wm, - enum plane_id plane_id, - int level) -{ - const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; - - if (level == 0 && pipe_wm->use_sagv_wm) - return &wm->sagv.wm0; - - return &wm->wm[level]; -} - -static const struct skl_wm_level * -skl_plane_trans_wm(const struct skl_pipe_wm *pipe_wm, - enum plane_id plane_id) -{ - const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; - - if (pipe_wm->use_sagv_wm) - return &wm->sagv.trans_wm; - - return &wm->trans_wm; -} - -/* - * We only disable the watermarks for each plane if - * they exceed the ddb allocation of said plane. This - * is done so that we don't end up touching cursor - * watermarks needlessly when some other plane reduces - * our max possible watermark level. - * - * Bspec has this to say about the PLANE_WM enable bit: - * "All the watermarks at this level for all enabled - * planes must be enabled before the level will be used." - * So this is actually safe to do. - */ -static void -skl_check_wm_level(struct skl_wm_level *wm, const struct skl_ddb_entry *ddb) -{ - if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) - memset(wm, 0, sizeof(*wm)); -} - -static void -skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm, - const struct skl_ddb_entry *ddb_y, const struct skl_ddb_entry *ddb) -{ - if (wm->min_ddb_alloc > skl_ddb_entry_size(ddb_y) || - uv_wm->min_ddb_alloc > skl_ddb_entry_size(ddb)) { - memset(wm, 0, sizeof(*wm)); - memset(uv_wm, 0, sizeof(*uv_wm)); - } -} - -static bool icl_need_wm1_wa(struct drm_i915_private *i915, - enum plane_id plane_id) -{ - /* - * Wa_1408961008:icl, ehl - * Wa_14012656716:tgl, adl - * Underruns with WM1+ disabled - */ - return DISPLAY_VER(i915) == 11 || - (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR); -} - -struct skl_plane_ddb_iter { - u64 data_rate; - u16 start, size; -}; - -static void -skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter, - struct skl_ddb_entry *ddb, - const struct skl_wm_level *wm, - u64 data_rate) -{ - u16 size, extra = 0; - - if (data_rate) { - extra = min_t(u16, iter->size, - DIV64_U64_ROUND_UP(iter->size * data_rate, - iter->data_rate)); - iter->size -= extra; - iter->data_rate -= data_rate; - } - - /* - * Keep ddb entry of all disabled planes explicitly zeroed - * to avoid skl_ddb_add_affected_planes() adding them to - * the state when other planes change their allocations. - */ - size = wm->min_ddb_alloc + extra; - if (size) - iter->start = skl_ddb_entry_init(ddb, iter->start, - iter->start + size); -} - -static int -skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - const struct intel_dbuf_state *dbuf_state = - intel_atomic_get_new_dbuf_state(state); - const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe]; - int num_active = hweight8(dbuf_state->active_pipes); - struct skl_plane_ddb_iter iter; - enum plane_id plane_id; - u16 cursor_size; - u32 blocks; - int level; - - /* Clear the partitioning for disabled planes. */ - memset(crtc_state->wm.skl.plane_ddb, 0, sizeof(crtc_state->wm.skl.plane_ddb)); - memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y)); - - if (!crtc_state->hw.active) - return 0; - - iter.start = alloc->start; - iter.size = skl_ddb_entry_size(alloc); - if (iter.size == 0) - return 0; - - /* Allocate fixed number of blocks for cursor. */ - cursor_size = skl_cursor_allocation(crtc_state, num_active); - iter.size -= cursor_size; - skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR], - alloc->end - cursor_size, alloc->end); - - iter.data_rate = skl_total_relative_data_rate(crtc_state); - - /* - * Find the highest watermark level for which we can satisfy the block - * requirement of active planes. - */ - for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) { - blocks = 0; - for_each_plane_id_on_crtc(crtc, plane_id) { - const struct skl_plane_wm *wm = - &crtc_state->wm.skl.optimal.planes[plane_id]; - - if (plane_id == PLANE_CURSOR) { - const struct skl_ddb_entry *ddb = - &crtc_state->wm.skl.plane_ddb[plane_id]; - - if (wm->wm[level].min_ddb_alloc > skl_ddb_entry_size(ddb)) { - drm_WARN_ON(&dev_priv->drm, - wm->wm[level].min_ddb_alloc != U16_MAX); - blocks = U32_MAX; - break; - } - continue; - } - - blocks += wm->wm[level].min_ddb_alloc; - blocks += wm->uv_wm[level].min_ddb_alloc; - } - - if (blocks <= iter.size) { - iter.size -= blocks; - break; - } - } - - if (level < 0) { - drm_dbg_kms(&dev_priv->drm, - "Requested display configuration exceeds system DDB limitations"); - drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n", - blocks, iter.size); - return -EINVAL; - } - - /* avoid the WARN later when we don't allocate any extra DDB */ - if (iter.data_rate == 0) - iter.size = 0; - - /* - * Grant each plane the blocks it requires at the highest achievable - * watermark level, plus an extra share of the leftover blocks - * proportional to its relative data rate. - */ - for_each_plane_id_on_crtc(crtc, plane_id) { - struct skl_ddb_entry *ddb = - &crtc_state->wm.skl.plane_ddb[plane_id]; - struct skl_ddb_entry *ddb_y = - &crtc_state->wm.skl.plane_ddb_y[plane_id]; - const struct skl_plane_wm *wm = - &crtc_state->wm.skl.optimal.planes[plane_id]; - - if (plane_id == PLANE_CURSOR) - continue; - - if (DISPLAY_VER(dev_priv) < 11 && - crtc_state->nv12_planes & BIT(plane_id)) { - skl_allocate_plane_ddb(&iter, ddb_y, &wm->wm[level], - crtc_state->rel_data_rate_y[plane_id]); - skl_allocate_plane_ddb(&iter, ddb, &wm->uv_wm[level], - crtc_state->rel_data_rate[plane_id]); - } else { - skl_allocate_plane_ddb(&iter, ddb, &wm->wm[level], - crtc_state->rel_data_rate[plane_id]); - } - } - drm_WARN_ON(&dev_priv->drm, iter.size != 0 || iter.data_rate != 0); - - /* - * When we calculated watermark values we didn't know how high - * of a level we'd actually be able to hit, so we just marked - * all levels as "enabled." Go back now and disable the ones - * that aren't actually possible. - */ - for (level++; level <= ilk_wm_max_level(dev_priv); level++) { - for_each_plane_id_on_crtc(crtc, plane_id) { - const struct skl_ddb_entry *ddb = - &crtc_state->wm.skl.plane_ddb[plane_id]; - const struct skl_ddb_entry *ddb_y = - &crtc_state->wm.skl.plane_ddb_y[plane_id]; - struct skl_plane_wm *wm = - &crtc_state->wm.skl.optimal.planes[plane_id]; - - if (DISPLAY_VER(dev_priv) < 11 && - crtc_state->nv12_planes & BIT(plane_id)) - skl_check_nv12_wm_level(&wm->wm[level], - &wm->uv_wm[level], - ddb_y, ddb); - else - skl_check_wm_level(&wm->wm[level], ddb); - - if (icl_need_wm1_wa(dev_priv, plane_id) && - level == 1 && wm->wm[0].enable) { - wm->wm[level].blocks = wm->wm[0].blocks; - wm->wm[level].lines = wm->wm[0].lines; - wm->wm[level].ignore_lines = wm->wm[0].ignore_lines; - } - } - } - - /* - * Go back and disable the transition and SAGV watermarks - * if it turns out we don't have enough DDB blocks for them. - */ - for_each_plane_id_on_crtc(crtc, plane_id) { - const struct skl_ddb_entry *ddb = - &crtc_state->wm.skl.plane_ddb[plane_id]; - const struct skl_ddb_entry *ddb_y = - &crtc_state->wm.skl.plane_ddb_y[plane_id]; - struct skl_plane_wm *wm = - &crtc_state->wm.skl.optimal.planes[plane_id]; - - if (DISPLAY_VER(dev_priv) < 11 && - crtc_state->nv12_planes & BIT(plane_id)) { - skl_check_wm_level(&wm->trans_wm, ddb_y); - } else { - WARN_ON(skl_ddb_entry_size(ddb_y)); - - skl_check_wm_level(&wm->trans_wm, ddb); - } - - skl_check_wm_level(&wm->sagv.wm0, ddb); - skl_check_wm_level(&wm->sagv.trans_wm, ddb); - } - - return 0; -} - -/* - * The max latency should be 257 (max the punit can code is 255 and we add 2us - * for the read latency) and cpp should always be <= 8, so that - * should allow pixel_rate up to ~2 GHz which seems sufficient since max - * 2xcdclk is 1350 MHz and the pixel rate should never exceed that. -*/ -static uint_fixed_16_16_t -skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate, - u8 cpp, u32 latency, u32 dbuf_block_size) -{ - u32 wm_intermediate_val; - uint_fixed_16_16_t ret; - - if (latency == 0) - return FP_16_16_MAX; - - wm_intermediate_val = latency * pixel_rate * cpp; - ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size); - - if (DISPLAY_VER(dev_priv) >= 10) - ret = add_fixed16_u32(ret, 1); - - return ret; -} - -static uint_fixed_16_16_t -skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency, - uint_fixed_16_16_t plane_blocks_per_line) -{ - u32 wm_intermediate_val; - uint_fixed_16_16_t ret; - - if (latency == 0) - return FP_16_16_MAX; - - wm_intermediate_val = latency * pixel_rate; - wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val, - pipe_htotal * 1000); - ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line); - return ret; -} - -static uint_fixed_16_16_t -intel_get_linetime_us(const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - u32 pixel_rate; - u32 crtc_htotal; - uint_fixed_16_16_t linetime_us; - - if (!crtc_state->hw.active) - return u32_to_fixed16(0); - - pixel_rate = crtc_state->pixel_rate; - - if (drm_WARN_ON(&dev_priv->drm, pixel_rate == 0)) - return u32_to_fixed16(0); - - crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal; - linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate); - - return linetime_us; -} - -static int -skl_compute_wm_params(const struct intel_crtc_state *crtc_state, - int width, const struct drm_format_info *format, - u64 modifier, unsigned int rotation, - u32 plane_pixel_rate, struct skl_wm_params *wp, - int color_plane) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 interm_pbpl; - - /* only planar format has two planes */ - if (color_plane == 1 && - !intel_format_info_is_yuv_semiplanar(format, modifier)) { - drm_dbg_kms(&dev_priv->drm, - "Non planar format have single plane\n"); - return -EINVAL; - } - - wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED || - modifier == I915_FORMAT_MOD_4_TILED || - modifier == I915_FORMAT_MOD_Yf_TILED || - modifier == I915_FORMAT_MOD_Y_TILED_CCS || - modifier == I915_FORMAT_MOD_Yf_TILED_CCS; - wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED; - wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS || - modifier == I915_FORMAT_MOD_Yf_TILED_CCS; - wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier); - - wp->width = width; - if (color_plane == 1 && wp->is_planar) - wp->width /= 2; - - wp->cpp = format->cpp[color_plane]; - wp->plane_pixel_rate = plane_pixel_rate; - - if (DISPLAY_VER(dev_priv) >= 11 && - modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1) - wp->dbuf_block_size = 256; - else - wp->dbuf_block_size = 512; - - if (drm_rotation_90_or_270(rotation)) { - switch (wp->cpp) { - case 1: - wp->y_min_scanlines = 16; - break; - case 2: - wp->y_min_scanlines = 8; - break; - case 4: - wp->y_min_scanlines = 4; - break; - default: - MISSING_CASE(wp->cpp); - return -EINVAL; - } - } else { - wp->y_min_scanlines = 4; - } - - if (skl_needs_memory_bw_wa(dev_priv)) - wp->y_min_scanlines *= 2; - - wp->plane_bytes_per_line = wp->width * wp->cpp; - if (wp->y_tiled) { - interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line * - wp->y_min_scanlines, - wp->dbuf_block_size); - - if (DISPLAY_VER(dev_priv) >= 10) - interm_pbpl++; - - wp->plane_blocks_per_line = div_fixed16(interm_pbpl, - wp->y_min_scanlines); - } else { - interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line, - wp->dbuf_block_size); - - if (!wp->x_tiled || DISPLAY_VER(dev_priv) >= 10) - interm_pbpl++; - - wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl); - } - - wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines, - wp->plane_blocks_per_line); - - wp->linetime_us = fixed16_to_u32_round_up( - intel_get_linetime_us(crtc_state)); - - return 0; -} - -static int -skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - struct skl_wm_params *wp, int color_plane) -{ - const struct drm_framebuffer *fb = plane_state->hw.fb; - int width; - - /* - * Src coordinates are already rotated by 270 degrees for - * the 90/270 degree plane rotation cases (to match the - * GTT mapping), hence no need to account for rotation here. - */ - width = drm_rect_width(&plane_state->uapi.src) >> 16; - - return skl_compute_wm_params(crtc_state, width, - fb->format, fb->modifier, - plane_state->hw.rotation, - intel_plane_pixel_rate(crtc_state, plane_state), - wp, color_plane); -} - -static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level) -{ - if (DISPLAY_VER(dev_priv) >= 10) - return true; - - /* The number of lines are ignored for the level 0 watermark. */ - return level > 0; -} - -static int skl_wm_max_lines(struct drm_i915_private *dev_priv) -{ - if (DISPLAY_VER(dev_priv) >= 13) - return 255; - else - return 31; -} - -static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, - struct intel_plane *plane, - int level, - unsigned int latency, - const struct skl_wm_params *wp, - const struct skl_wm_level *result_prev, - struct skl_wm_level *result /* out */) -{ - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - uint_fixed_16_16_t method1, method2; - uint_fixed_16_16_t selected_result; - u32 blocks, lines, min_ddb_alloc = 0; - - if (latency == 0 || - (use_minimal_wm0_only(crtc_state, plane) && level > 0)) { - /* reject it */ - result->min_ddb_alloc = U16_MAX; - return; - } - - /* - * WaIncreaseLatencyIPCEnabled: kbl,cfl - * Display WA #1141: kbl,cfl - */ - if ((IS_KABYLAKE(dev_priv) || - IS_COFFEELAKE(dev_priv) || - IS_COMETLAKE(dev_priv)) && - dev_priv->ipc_enabled) - latency += 4; - - if (skl_needs_memory_bw_wa(dev_priv) && wp->x_tiled) - latency += 15; - - method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate, - wp->cpp, latency, wp->dbuf_block_size); - method2 = skl_wm_method2(wp->plane_pixel_rate, - crtc_state->hw.pipe_mode.crtc_htotal, - latency, - wp->plane_blocks_per_line); - - if (wp->y_tiled) { - selected_result = max_fixed16(method2, wp->y_tile_minimum); - } else { - if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal / - wp->dbuf_block_size < 1) && - (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) { - selected_result = method2; - } else if (latency >= wp->linetime_us) { - if (DISPLAY_VER(dev_priv) == 9) - selected_result = min_fixed16(method1, method2); - else - selected_result = method2; - } else { - selected_result = method1; - } - } - - blocks = fixed16_to_u32_round_up(selected_result) + 1; - /* - * Lets have blocks at minimum equivalent to plane_blocks_per_line - * as there will be at minimum one line for lines configuration. This - * is a work around for FIFO underruns observed with resolutions like - * 4k 60 Hz in single channel DRAM configurations. - * - * As per the Bspec 49325, if the ddb allocation can hold at least - * one plane_blocks_per_line, we should have selected method2 in - * the above logic. Assuming that modern versions have enough dbuf - * and method2 guarantees blocks equivalent to at least 1 line, - * select the blocks as plane_blocks_per_line. - * - * TODO: Revisit the logic when we have better understanding on DRAM - * channels' impact on the level 0 memory latency and the relevant - * wm calculations. - */ - if (skl_wm_has_lines(dev_priv, level)) - blocks = max(blocks, - fixed16_to_u32_round_up(wp->plane_blocks_per_line)); - lines = div_round_up_fixed16(selected_result, - wp->plane_blocks_per_line); - - if (DISPLAY_VER(dev_priv) == 9) { - /* Display WA #1125: skl,bxt,kbl */ - if (level == 0 && wp->rc_surface) - blocks += fixed16_to_u32_round_up(wp->y_tile_minimum); - - /* Display WA #1126: skl,bxt,kbl */ - if (level >= 1 && level <= 7) { - if (wp->y_tiled) { - blocks += fixed16_to_u32_round_up(wp->y_tile_minimum); - lines += wp->y_min_scanlines; - } else { - blocks++; - } - - /* - * Make sure result blocks for higher latency levels are - * atleast as high as level below the current level. - * Assumption in DDB algorithm optimization for special - * cases. Also covers Display WA #1125 for RC. - */ - if (result_prev->blocks > blocks) - blocks = result_prev->blocks; - } - } - - if (DISPLAY_VER(dev_priv) >= 11) { - if (wp->y_tiled) { - int extra_lines; - - if (lines % wp->y_min_scanlines == 0) - extra_lines = wp->y_min_scanlines; - else - extra_lines = wp->y_min_scanlines * 2 - - lines % wp->y_min_scanlines; - - min_ddb_alloc = mul_round_up_u32_fixed16(lines + extra_lines, - wp->plane_blocks_per_line); - } else { - min_ddb_alloc = blocks + DIV_ROUND_UP(blocks, 10); - } - } - - if (!skl_wm_has_lines(dev_priv, level)) - lines = 0; - - if (lines > skl_wm_max_lines(dev_priv)) { - /* reject it */ - result->min_ddb_alloc = U16_MAX; - return; - } - - /* - * If lines is valid, assume we can use this watermark level - * for now. We'll come back and disable it after we calculate the - * DDB allocation if it turns out we don't actually have enough - * blocks to satisfy it. - */ - result->blocks = blocks; - result->lines = lines; - /* Bspec says: value >= plane ddb allocation -> invalid, hence the +1 here */ - result->min_ddb_alloc = max(min_ddb_alloc, blocks) + 1; - result->enable = true; - - if (DISPLAY_VER(dev_priv) < 12 && dev_priv->sagv_block_time_us) - result->can_sagv = latency >= dev_priv->sagv_block_time_us; -} - -static void -skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, - struct intel_plane *plane, - const struct skl_wm_params *wm_params, - struct skl_wm_level *levels) -{ - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - int level, max_level = ilk_wm_max_level(dev_priv); - struct skl_wm_level *result_prev = &levels[0]; - - for (level = 0; level <= max_level; level++) { - struct skl_wm_level *result = &levels[level]; - unsigned int latency = dev_priv->wm.skl_latency[level]; - - skl_compute_plane_wm(crtc_state, plane, level, latency, - wm_params, result_prev, result); - - result_prev = result; - } -} - -static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state, - struct intel_plane *plane, - const struct skl_wm_params *wm_params, - struct skl_plane_wm *plane_wm) -{ - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - struct skl_wm_level *sagv_wm = &plane_wm->sagv.wm0; - struct skl_wm_level *levels = plane_wm->wm; - unsigned int latency = 0; - - if (dev_priv->sagv_block_time_us) - latency = dev_priv->sagv_block_time_us + dev_priv->wm.skl_latency[0]; - - skl_compute_plane_wm(crtc_state, plane, 0, latency, - wm_params, &levels[0], - sagv_wm); -} - -static void skl_compute_transition_wm(struct drm_i915_private *dev_priv, - struct skl_wm_level *trans_wm, - const struct skl_wm_level *wm0, - const struct skl_wm_params *wp) -{ - u16 trans_min, trans_amount, trans_y_tile_min; - u16 wm0_blocks, trans_offset, blocks; - - /* Transition WM don't make any sense if ipc is disabled */ - if (!dev_priv->ipc_enabled) - return; - - /* - * WaDisableTWM:skl,kbl,cfl,bxt - * Transition WM are not recommended by HW team for GEN9 - */ - if (DISPLAY_VER(dev_priv) == 9) - return; - - if (DISPLAY_VER(dev_priv) >= 11) - trans_min = 4; - else - trans_min = 14; - - /* Display WA #1140: glk,cnl */ - if (DISPLAY_VER(dev_priv) == 10) - trans_amount = 0; - else - trans_amount = 10; /* This is configurable amount */ - - trans_offset = trans_min + trans_amount; - - /* - * The spec asks for Selected Result Blocks for wm0 (the real value), - * not Result Blocks (the integer value). Pay attention to the capital - * letters. The value wm_l0->blocks is actually Result Blocks, but - * since Result Blocks is the ceiling of Selected Result Blocks plus 1, - * and since we later will have to get the ceiling of the sum in the - * transition watermarks calculation, we can just pretend Selected - * Result Blocks is Result Blocks minus 1 and it should work for the - * current platforms. - */ - wm0_blocks = wm0->blocks - 1; - - if (wp->y_tiled) { - trans_y_tile_min = - (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum); - blocks = max(wm0_blocks, trans_y_tile_min) + trans_offset; - } else { - blocks = wm0_blocks + trans_offset; - } - blocks++; - - /* - * Just assume we can enable the transition watermark. After - * computing the DDB we'll come back and disable it if that - * assumption turns out to be false. - */ - trans_wm->blocks = blocks; - trans_wm->min_ddb_alloc = max_t(u16, wm0->min_ddb_alloc, blocks + 1); - trans_wm->enable = true; -} - -static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - struct intel_plane *plane, int color_plane) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id]; - struct skl_wm_params wm_params; - int ret; - - ret = skl_compute_plane_wm_params(crtc_state, plane_state, - &wm_params, color_plane); - if (ret) - return ret; - - skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm); - - skl_compute_transition_wm(dev_priv, &wm->trans_wm, - &wm->wm[0], &wm_params); - - if (DISPLAY_VER(dev_priv) >= 12) { - tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm); - - skl_compute_transition_wm(dev_priv, &wm->sagv.trans_wm, - &wm->sagv.wm0, &wm_params); - } - - return 0; -} - -static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - struct intel_plane *plane) -{ - struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id]; - struct skl_wm_params wm_params; - int ret; - - wm->is_planar = true; - - /* uv plane watermarks must also be validated for NV12/Planar */ - ret = skl_compute_plane_wm_params(crtc_state, plane_state, - &wm_params, 1); - if (ret) - return ret; - - skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm); - - return 0; -} - -static int skl_build_plane_wm(struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - enum plane_id plane_id = plane->id; - struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; - const struct drm_framebuffer *fb = plane_state->hw.fb; - int ret; - - memset(wm, 0, sizeof(*wm)); - - if (!intel_wm_plane_visible(crtc_state, plane_state)) - return 0; - - ret = skl_build_plane_wm_single(crtc_state, plane_state, - plane, 0); - if (ret) - return ret; - - if (fb->format->is_yuv && fb->format->num_planes > 1) { - ret = skl_build_plane_wm_uv(crtc_state, plane_state, - plane); - if (ret) - return ret; - } - - return 0; -} - -static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - enum plane_id plane_id = plane->id; - struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; - int ret; - - /* Watermarks calculated in master */ - if (plane_state->planar_slave) - return 0; - - memset(wm, 0, sizeof(*wm)); - - if (plane_state->planar_linked_plane) { - const struct drm_framebuffer *fb = plane_state->hw.fb; - - drm_WARN_ON(&dev_priv->drm, - !intel_wm_plane_visible(crtc_state, plane_state)); - drm_WARN_ON(&dev_priv->drm, !fb->format->is_yuv || - fb->format->num_planes == 1); - - ret = skl_build_plane_wm_single(crtc_state, plane_state, - plane_state->planar_linked_plane, 0); - if (ret) - return ret; - - ret = skl_build_plane_wm_single(crtc_state, plane_state, - plane, 1); - if (ret) - return ret; - } else if (intel_wm_plane_visible(crtc_state, plane_state)) { - ret = skl_build_plane_wm_single(crtc_state, plane_state, - plane, 0); - if (ret) - return ret; - } - - return 0; -} - -static int skl_build_pipe_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - const struct intel_plane_state *plane_state; - struct intel_plane *plane; - int ret, i; - - for_each_new_intel_plane_in_state(state, plane, plane_state, i) { - /* - * FIXME should perhaps check {old,new}_plane_crtc->hw.crtc - * instead but we don't populate that correctly for NV12 Y - * planes so for now hack this. - */ - if (plane->pipe != crtc->pipe) - continue; - - if (DISPLAY_VER(dev_priv) >= 11) - ret = icl_build_plane_wm(crtc_state, plane_state); - else - ret = skl_build_plane_wm(crtc_state, plane_state); - if (ret) - return ret; - } - - crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw; - - return 0; -} - -static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, - i915_reg_t reg, - const struct skl_ddb_entry *entry) -{ - if (entry->end) - intel_de_write_fw(dev_priv, reg, - PLANE_BUF_END(entry->end - 1) | - PLANE_BUF_START(entry->start)); - else - intel_de_write_fw(dev_priv, reg, 0); -} - -static void skl_write_wm_level(struct drm_i915_private *dev_priv, - i915_reg_t reg, - const struct skl_wm_level *level) -{ - u32 val = 0; - - if (level->enable) - val |= PLANE_WM_EN; - if (level->ignore_lines) - val |= PLANE_WM_IGNORE_LINES; - val |= REG_FIELD_PREP(PLANE_WM_BLOCKS_MASK, level->blocks); - val |= REG_FIELD_PREP(PLANE_WM_LINES_MASK, level->lines); - - intel_de_write_fw(dev_priv, reg, val); -} - -void skl_write_plane_wm(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - int level, max_level = ilk_wm_max_level(dev_priv); - enum plane_id plane_id = plane->id; - enum pipe pipe = plane->pipe; - const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; - const struct skl_ddb_entry *ddb = - &crtc_state->wm.skl.plane_ddb[plane_id]; - const struct skl_ddb_entry *ddb_y = - &crtc_state->wm.skl.plane_ddb_y[plane_id]; - - for (level = 0; level <= max_level; level++) - skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level), - skl_plane_wm_level(pipe_wm, plane_id, level)); - - skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id), - skl_plane_trans_wm(pipe_wm, plane_id)); - - if (HAS_HW_SAGV_WM(dev_priv)) { - const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; - - skl_write_wm_level(dev_priv, PLANE_WM_SAGV(pipe, plane_id), - &wm->sagv.wm0); - skl_write_wm_level(dev_priv, PLANE_WM_SAGV_TRANS(pipe, plane_id), - &wm->sagv.trans_wm); - } - - skl_ddb_entry_write(dev_priv, - PLANE_BUF_CFG(pipe, plane_id), ddb); - - if (DISPLAY_VER(dev_priv) < 11) - skl_ddb_entry_write(dev_priv, - PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_y); -} - -void skl_write_cursor_wm(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - int level, max_level = ilk_wm_max_level(dev_priv); - enum plane_id plane_id = plane->id; - enum pipe pipe = plane->pipe; - const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; - const struct skl_ddb_entry *ddb = - &crtc_state->wm.skl.plane_ddb[plane_id]; - - for (level = 0; level <= max_level; level++) - skl_write_wm_level(dev_priv, CUR_WM(pipe, level), - skl_plane_wm_level(pipe_wm, plane_id, level)); - - skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), - skl_plane_trans_wm(pipe_wm, plane_id)); - - if (HAS_HW_SAGV_WM(dev_priv)) { - const struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; - - skl_write_wm_level(dev_priv, CUR_WM_SAGV(pipe), - &wm->sagv.wm0); - skl_write_wm_level(dev_priv, CUR_WM_SAGV_TRANS(pipe), - &wm->sagv.trans_wm); - } - - skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb); -} - -static bool skl_wm_level_equals(const struct skl_wm_level *l1, - const struct skl_wm_level *l2) -{ - return l1->enable == l2->enable && - l1->ignore_lines == l2->ignore_lines && - l1->lines == l2->lines && - l1->blocks == l2->blocks; -} - -static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv, - const struct skl_plane_wm *wm1, - const struct skl_plane_wm *wm2) -{ - int level, max_level = ilk_wm_max_level(dev_priv); - - for (level = 0; level <= max_level; level++) { - /* - * We don't check uv_wm as the hardware doesn't actually - * use it. It only gets used for calculating the required - * ddb allocation. - */ - if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level])) - return false; - } - - return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm) && - skl_wm_level_equals(&wm1->sagv.wm0, &wm2->sagv.wm0) && - skl_wm_level_equals(&wm1->sagv.trans_wm, &wm2->sagv.trans_wm); -} - -static bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a, - const struct skl_ddb_entry *b) -{ - return a->start < b->end && b->start < a->end; -} - -static void skl_ddb_entry_union(struct skl_ddb_entry *a, - const struct skl_ddb_entry *b) -{ - if (a->end && b->end) { - a->start = min(a->start, b->start); - a->end = max(a->end, b->end); - } else if (b->end) { - a->start = b->start; - a->end = b->end; - } -} - -bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, - const struct skl_ddb_entry *entries, - int num_entries, int ignore_idx) -{ - int i; - - for (i = 0; i < num_entries; i++) { - if (i != ignore_idx && - skl_ddb_entries_overlap(ddb, &entries[i])) - return true; - } - - return false; -} - -static int -skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state, - struct intel_crtc_state *new_crtc_state) -{ - struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->uapi.state); - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_plane *plane; - - for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { - struct intel_plane_state *plane_state; - enum plane_id plane_id = plane->id; - - if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb[plane_id], - &new_crtc_state->wm.skl.plane_ddb[plane_id]) && - skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id], - &new_crtc_state->wm.skl.plane_ddb_y[plane_id])) - continue; - - plane_state = intel_atomic_get_plane_state(state, plane); - if (IS_ERR(plane_state)) - return PTR_ERR(plane_state); - - new_crtc_state->update_planes |= BIT(plane_id); - } - - return 0; -} - -static u8 intel_dbuf_enabled_slices(const struct intel_dbuf_state *dbuf_state) -{ - struct drm_i915_private *dev_priv = to_i915(dbuf_state->base.state->base.dev); - u8 enabled_slices; - enum pipe pipe; - - /* - * FIXME: For now we always enable slice S1 as per - * the Bspec display initialization sequence. - */ - enabled_slices = BIT(DBUF_S1); - - for_each_pipe(dev_priv, pipe) - enabled_slices |= dbuf_state->slices[pipe]; - - return enabled_slices; -} - -static int -skl_compute_ddb(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_dbuf_state *old_dbuf_state; - struct intel_dbuf_state *new_dbuf_state = NULL; - const struct intel_crtc_state *old_crtc_state; - struct intel_crtc_state *new_crtc_state; - struct intel_crtc *crtc; - int ret, i; - - for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { - new_dbuf_state = intel_atomic_get_dbuf_state(state); - if (IS_ERR(new_dbuf_state)) - return PTR_ERR(new_dbuf_state); - - old_dbuf_state = intel_atomic_get_old_dbuf_state(state); - break; - } - - if (!new_dbuf_state) - return 0; - - new_dbuf_state->active_pipes = - intel_calc_active_pipes(state, old_dbuf_state->active_pipes); - - if (old_dbuf_state->active_pipes != new_dbuf_state->active_pipes) { - ret = intel_atomic_lock_global_state(&new_dbuf_state->base); - if (ret) - return ret; - } - - if (HAS_MBUS_JOINING(dev_priv)) - new_dbuf_state->joined_mbus = - adlp_check_mbus_joined(new_dbuf_state->active_pipes); - - for_each_intel_crtc(&dev_priv->drm, crtc) { - enum pipe pipe = crtc->pipe; - - new_dbuf_state->slices[pipe] = - skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes, - new_dbuf_state->joined_mbus); - - if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe]) - continue; - - ret = intel_atomic_lock_global_state(&new_dbuf_state->base); - if (ret) - return ret; - } - - new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state); - - if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices || - old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) { - ret = intel_atomic_serialize_global_state(&new_dbuf_state->base); - if (ret) - return ret; - - if (old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) { - /* TODO: Implement vblank synchronized MBUS joining changes */ - ret = intel_modeset_all_pipes(state); - if (ret) - return ret; - } - - drm_dbg_kms(&dev_priv->drm, - "Enabled dbuf slices 0x%x -> 0x%x (total dbuf slices 0x%x), mbus joined? %s->%s\n", - old_dbuf_state->enabled_slices, - new_dbuf_state->enabled_slices, - INTEL_INFO(dev_priv)->display.dbuf.slice_mask, - str_yes_no(old_dbuf_state->joined_mbus), - str_yes_no(new_dbuf_state->joined_mbus)); - } - - for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { - enum pipe pipe = crtc->pipe; - - new_dbuf_state->weight[pipe] = intel_crtc_ddb_weight(new_crtc_state); - - if (old_dbuf_state->weight[pipe] == new_dbuf_state->weight[pipe]) - continue; - - ret = intel_atomic_lock_global_state(&new_dbuf_state->base); - if (ret) - return ret; - } - - for_each_intel_crtc(&dev_priv->drm, crtc) { - ret = skl_crtc_allocate_ddb(state, crtc); - if (ret) - return ret; - } - - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { - ret = skl_crtc_allocate_plane_ddb(state, crtc); - if (ret) - return ret; - - ret = skl_ddb_add_affected_planes(old_crtc_state, - new_crtc_state); - if (ret) - return ret; - } - - return 0; -} - -static char enast(bool enable) -{ - return enable ? '*' : ' '; -} - -static void -skl_print_wm_changes(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_crtc_state *old_crtc_state; - const struct intel_crtc_state *new_crtc_state; - struct intel_plane *plane; - struct intel_crtc *crtc; - int i; - - if (!drm_debug_enabled(DRM_UT_KMS)) - return; - - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { - const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm; - - old_pipe_wm = &old_crtc_state->wm.skl.optimal; - new_pipe_wm = &new_crtc_state->wm.skl.optimal; - - for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { - enum plane_id plane_id = plane->id; - const struct skl_ddb_entry *old, *new; - - old = &old_crtc_state->wm.skl.plane_ddb[plane_id]; - new = &new_crtc_state->wm.skl.plane_ddb[plane_id]; - - if (skl_ddb_entry_equal(old, new)) - continue; - - drm_dbg_kms(&dev_priv->drm, - "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n", - plane->base.base.id, plane->base.name, - old->start, old->end, new->start, new->end, - skl_ddb_entry_size(old), skl_ddb_entry_size(new)); - } - - for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { - enum plane_id plane_id = plane->id; - const struct skl_plane_wm *old_wm, *new_wm; - - old_wm = &old_pipe_wm->planes[plane_id]; - new_wm = &new_pipe_wm->planes[plane_id]; - - if (skl_plane_wm_equals(dev_priv, old_wm, new_wm)) - continue; - - drm_dbg_kms(&dev_priv->drm, - "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" - " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n", - plane->base.base.id, plane->base.name, - enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable), - enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable), - enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable), - enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable), - enast(old_wm->trans_wm.enable), - enast(old_wm->sagv.wm0.enable), - enast(old_wm->sagv.trans_wm.enable), - enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable), - enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable), - enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable), - enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable), - enast(new_wm->trans_wm.enable), - enast(new_wm->sagv.wm0.enable), - enast(new_wm->sagv.trans_wm.enable)); - - drm_dbg_kms(&dev_priv->drm, - "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" - " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n", - plane->base.base.id, plane->base.name, - enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines, - enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines, - enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines, - enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines, - enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines, - enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines, - enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines, - enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines, - enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines, - enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, - enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines, - enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines, - enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines, - enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines, - enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines, - enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines, - enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines, - enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines, - enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines, - enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines, - enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines, - enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines); - - drm_dbg_kms(&dev_priv->drm, - "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" - " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", - plane->base.base.id, plane->base.name, - old_wm->wm[0].blocks, old_wm->wm[1].blocks, - old_wm->wm[2].blocks, old_wm->wm[3].blocks, - old_wm->wm[4].blocks, old_wm->wm[5].blocks, - old_wm->wm[6].blocks, old_wm->wm[7].blocks, - old_wm->trans_wm.blocks, - old_wm->sagv.wm0.blocks, - old_wm->sagv.trans_wm.blocks, - new_wm->wm[0].blocks, new_wm->wm[1].blocks, - new_wm->wm[2].blocks, new_wm->wm[3].blocks, - new_wm->wm[4].blocks, new_wm->wm[5].blocks, - new_wm->wm[6].blocks, new_wm->wm[7].blocks, - new_wm->trans_wm.blocks, - new_wm->sagv.wm0.blocks, - new_wm->sagv.trans_wm.blocks); - - drm_dbg_kms(&dev_priv->drm, - "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" - " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", - plane->base.base.id, plane->base.name, - old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc, - old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc, - old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc, - old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc, - old_wm->trans_wm.min_ddb_alloc, - old_wm->sagv.wm0.min_ddb_alloc, - old_wm->sagv.trans_wm.min_ddb_alloc, - new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc, - new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc, - new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc, - new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc, - new_wm->trans_wm.min_ddb_alloc, - new_wm->sagv.wm0.min_ddb_alloc, - new_wm->sagv.trans_wm.min_ddb_alloc); - } - } -} - -static bool skl_plane_selected_wm_equals(struct intel_plane *plane, - const struct skl_pipe_wm *old_pipe_wm, - const struct skl_pipe_wm *new_pipe_wm) -{ - struct drm_i915_private *i915 = to_i915(plane->base.dev); - int level, max_level = ilk_wm_max_level(i915); - - for (level = 0; level <= max_level; level++) { - /* - * We don't check uv_wm as the hardware doesn't actually - * use it. It only gets used for calculating the required - * ddb allocation. - */ - if (!skl_wm_level_equals(skl_plane_wm_level(old_pipe_wm, plane->id, level), - skl_plane_wm_level(new_pipe_wm, plane->id, level))) - return false; - } - - if (HAS_HW_SAGV_WM(i915)) { - const struct skl_plane_wm *old_wm = &old_pipe_wm->planes[plane->id]; - const struct skl_plane_wm *new_wm = &new_pipe_wm->planes[plane->id]; - - if (!skl_wm_level_equals(&old_wm->sagv.wm0, &new_wm->sagv.wm0) || - !skl_wm_level_equals(&old_wm->sagv.trans_wm, &new_wm->sagv.trans_wm)) - return false; - } - - return skl_wm_level_equals(skl_plane_trans_wm(old_pipe_wm, plane->id), - skl_plane_trans_wm(new_pipe_wm, plane->id)); -} - -/* - * To make sure the cursor watermark registers are always consistent - * with our computed state the following scenario needs special - * treatment: - * - * 1. enable cursor - * 2. move cursor entirely offscreen - * 3. disable cursor - * - * Step 2. does call .disable_plane() but does not zero the watermarks - * (since we consider an offscreen cursor still active for the purposes - * of watermarks). Step 3. would not normally call .disable_plane() - * because the actual plane visibility isn't changing, and we don't - * deallocate the cursor ddb until the pipe gets disabled. So we must - * force step 3. to call .disable_plane() to update the watermark - * registers properly. - * - * Other planes do not suffer from this issues as their watermarks are - * calculated based on the actual plane visibility. The only time this - * can trigger for the other planes is during the initial readout as the - * default value of the watermarks registers is not zero. - */ -static int skl_wm_add_affected_planes(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct intel_crtc_state *old_crtc_state = - intel_atomic_get_old_crtc_state(state, crtc); - struct intel_crtc_state *new_crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - struct intel_plane *plane; - - for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { - struct intel_plane_state *plane_state; - enum plane_id plane_id = plane->id; - - /* - * Force a full wm update for every plane on modeset. - * Required because the reset value of the wm registers - * is non-zero, whereas we want all disabled planes to - * have zero watermarks. So if we turn off the relevant - * power well the hardware state will go out of sync - * with the software state. - */ - if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) && - skl_plane_selected_wm_equals(plane, - &old_crtc_state->wm.skl.optimal, - &new_crtc_state->wm.skl.optimal)) - continue; - - plane_state = intel_atomic_get_plane_state(state, plane); - if (IS_ERR(plane_state)) - return PTR_ERR(plane_state); - - new_crtc_state->update_planes |= BIT(plane_id); - } - - return 0; -} - -static int -skl_compute_wm(struct intel_atomic_state *state) -{ - struct intel_crtc *crtc; - struct intel_crtc_state *new_crtc_state; - int ret, i; - - for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { - ret = skl_build_pipe_wm(state, crtc); - if (ret) - return ret; - } - - ret = skl_compute_ddb(state); - if (ret) - return ret; - - ret = intel_compute_sagv_mask(state); - if (ret) - return ret; - - /* - * skl_compute_ddb() will have adjusted the final watermarks - * based on how much ddb is available. Now we can actually - * check if the final watermarks changed. - */ - for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { - ret = skl_wm_add_affected_planes(state, crtc); - if (ret) - return ret; - } - - skl_print_wm_changes(state); - - return 0; -} - static void ilk_compute_wm_config(struct drm_i915_private *dev_priv, struct intel_wm_config *config) { @@ -6458,10 +3583,10 @@ static void ilk_initial_watermarks(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - mutex_lock(&dev_priv->wm.wm_mutex); + mutex_lock(&dev_priv->display.wm.wm_mutex); crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate; ilk_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->wm.wm_mutex); + mutex_unlock(&dev_priv->display.wm.wm_mutex); } static void ilk_optimize_watermarks(struct intel_atomic_state *state, @@ -6474,210 +3599,17 @@ static void ilk_optimize_watermarks(struct intel_atomic_state *state, if (!crtc_state->wm.need_postvbl_update) return; - mutex_lock(&dev_priv->wm.wm_mutex); + mutex_lock(&dev_priv->display.wm.wm_mutex); crtc->wm.active.ilk = crtc_state->wm.ilk.optimal; ilk_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->wm.wm_mutex); -} - -static void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level) -{ - level->enable = val & PLANE_WM_EN; - level->ignore_lines = val & PLANE_WM_IGNORE_LINES; - level->blocks = REG_FIELD_GET(PLANE_WM_BLOCKS_MASK, val); - level->lines = REG_FIELD_GET(PLANE_WM_LINES_MASK, val); -} - -static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, - struct skl_pipe_wm *out) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - int level, max_level; - enum plane_id plane_id; - u32 val; - - max_level = ilk_wm_max_level(dev_priv); - - for_each_plane_id_on_crtc(crtc, plane_id) { - struct skl_plane_wm *wm = &out->planes[plane_id]; - - for (level = 0; level <= max_level; level++) { - if (plane_id != PLANE_CURSOR) - val = intel_uncore_read(&dev_priv->uncore, PLANE_WM(pipe, plane_id, level)); - else - val = intel_uncore_read(&dev_priv->uncore, CUR_WM(pipe, level)); - - skl_wm_level_from_reg_val(val, &wm->wm[level]); - } - - if (plane_id != PLANE_CURSOR) - val = intel_uncore_read(&dev_priv->uncore, PLANE_WM_TRANS(pipe, plane_id)); - else - val = intel_uncore_read(&dev_priv->uncore, CUR_WM_TRANS(pipe)); - - skl_wm_level_from_reg_val(val, &wm->trans_wm); - - if (HAS_HW_SAGV_WM(dev_priv)) { - if (plane_id != PLANE_CURSOR) - val = intel_uncore_read(&dev_priv->uncore, - PLANE_WM_SAGV(pipe, plane_id)); - else - val = intel_uncore_read(&dev_priv->uncore, - CUR_WM_SAGV(pipe)); - - skl_wm_level_from_reg_val(val, &wm->sagv.wm0); - - if (plane_id != PLANE_CURSOR) - val = intel_uncore_read(&dev_priv->uncore, - PLANE_WM_SAGV_TRANS(pipe, plane_id)); - else - val = intel_uncore_read(&dev_priv->uncore, - CUR_WM_SAGV_TRANS(pipe)); - - skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm); - } else if (DISPLAY_VER(dev_priv) >= 12) { - wm->sagv.wm0 = wm->wm[0]; - wm->sagv.trans_wm = wm->trans_wm; - } - } -} - -void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) -{ - struct intel_dbuf_state *dbuf_state = - to_intel_dbuf_state(dev_priv->dbuf.obj.state); - struct intel_crtc *crtc; - - if (HAS_MBUS_JOINING(dev_priv)) - dbuf_state->joined_mbus = intel_de_read(dev_priv, MBUS_CTL) & MBUS_JOIN; - - for_each_intel_crtc(&dev_priv->drm, crtc) { - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - enum pipe pipe = crtc->pipe; - unsigned int mbus_offset; - enum plane_id plane_id; - u8 slices; - - skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal); - crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal; - - memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe])); - - for_each_plane_id_on_crtc(crtc, plane_id) { - struct skl_ddb_entry *ddb = - &crtc_state->wm.skl.plane_ddb[plane_id]; - struct skl_ddb_entry *ddb_y = - &crtc_state->wm.skl.plane_ddb_y[plane_id]; - - skl_ddb_get_hw_plane_state(dev_priv, crtc->pipe, - plane_id, ddb, ddb_y); - - skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb); - skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_y); - } - - dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state); - - /* - * Used for checking overlaps, so we need absolute - * offsets instead of MBUS relative offsets. - */ - slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes, - dbuf_state->joined_mbus); - mbus_offset = mbus_ddb_offset(dev_priv, slices); - crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start; - crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end; - - /* The slices actually used by the planes on the pipe */ - dbuf_state->slices[pipe] = - skl_ddb_dbuf_slice_mask(dev_priv, &crtc_state->wm.skl.ddb); - - drm_dbg_kms(&dev_priv->drm, - "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n", - crtc->base.base.id, crtc->base.name, - dbuf_state->slices[pipe], dbuf_state->ddb[pipe].start, - dbuf_state->ddb[pipe].end, dbuf_state->active_pipes, - str_yes_no(dbuf_state->joined_mbus)); - } - - dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices; -} - -static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915) -{ - const struct intel_dbuf_state *dbuf_state = - to_intel_dbuf_state(i915->dbuf.obj.state); - struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; - struct intel_crtc *crtc; - - for_each_intel_crtc(&i915->drm, crtc) { - const struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - - entries[crtc->pipe] = crtc_state->wm.skl.ddb; - } - - for_each_intel_crtc(&i915->drm, crtc) { - const struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - u8 slices; - - slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes, - dbuf_state->joined_mbus); - if (dbuf_state->slices[crtc->pipe] & ~slices) - return true; - - if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries, - I915_MAX_PIPES, crtc->pipe)) - return true; - } - - return false; -} - -void skl_wm_sanitize(struct drm_i915_private *i915) -{ - struct intel_crtc *crtc; - - /* - * On TGL/RKL (at least) the BIOS likes to assign the planes - * to the wrong DBUF slices. This will cause an infinite loop - * in skl_commit_modeset_enables() as it can't find a way to - * transition between the old bogus DBUF layout to the new - * proper DBUF layout without DBUF allocation overlaps between - * the planes (which cannot be allowed or else the hardware - * may hang). If we detect a bogus DBUF layout just turn off - * all the planes so that skl_commit_modeset_enables() can - * simply ignore them. - */ - if (!skl_dbuf_is_misconfigured(i915)) - return; - - drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n"); - - for_each_intel_crtc(&i915->drm, crtc) { - struct intel_plane *plane = to_intel_plane(crtc->base.primary); - const struct intel_plane_state *plane_state = - to_intel_plane_state(plane->base.state); - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - - if (plane_state->uapi.visible) - intel_plane_disable_noatomic(crtc, plane); - - drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0); - - memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb)); - } + mutex_unlock(&dev_priv->display.wm.wm_mutex); } static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct ilk_wm_values *hw = &dev_priv->wm.hw; + struct ilk_wm_values *hw = &dev_priv->display.wm.hw; struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal; enum pipe pipe = crtc->pipe; @@ -6825,7 +3757,7 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv, void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv) { - struct g4x_wm_values *wm = &dev_priv->wm.g4x; + struct g4x_wm_values *wm = &dev_priv->display.wm.g4x; struct intel_crtc *crtc; g4x_read_wm_values(dev_priv, wm); @@ -6919,7 +3851,7 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv) struct intel_plane *plane; struct intel_crtc *crtc; - mutex_lock(&dev_priv->wm.wm_mutex); + mutex_lock(&dev_priv->display.wm.wm_mutex); for_each_intel_plane(&dev_priv->drm, plane) { struct intel_crtc *crtc = @@ -6967,12 +3899,12 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv) g4x_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->wm.wm_mutex); + mutex_unlock(&dev_priv->display.wm.wm_mutex); } void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) { - struct vlv_wm_values *wm = &dev_priv->wm.vlv; + struct vlv_wm_values *wm = &dev_priv->display.wm.vlv; struct intel_crtc *crtc; u32 val; @@ -7006,7 +3938,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) drm_dbg_kms(&dev_priv->drm, "Punit not acking DDR DVFS request, " "assuming DDR DVFS is disabled\n"); - dev_priv->wm.max_level = VLV_WM_LEVEL_PM5; + dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM5; } else { val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); if ((val & FORCE_DDR_HIGH_FREQ) == 0) @@ -7075,7 +4007,7 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv) struct intel_plane *plane; struct intel_crtc *crtc; - mutex_lock(&dev_priv->wm.wm_mutex); + mutex_lock(&dev_priv->display.wm.wm_mutex); for_each_intel_plane(&dev_priv->drm, plane) { struct intel_crtc *crtc = @@ -7116,7 +4048,7 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv) vlv_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->wm.wm_mutex); + mutex_unlock(&dev_priv->display.wm.wm_mutex); } /* @@ -7137,7 +4069,7 @@ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv) { - struct ilk_wm_values *hw = &dev_priv->wm.hw; + struct ilk_wm_values *hw = &dev_priv->display.wm.hw; struct intel_crtc *crtc; ilk_init_lp_watermarks(dev_priv); @@ -7166,168 +4098,6 @@ void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv) !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS); } -void intel_wm_state_verify(struct intel_crtc *crtc, - struct intel_crtc_state *new_crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct skl_hw_state { - struct skl_ddb_entry ddb[I915_MAX_PLANES]; - struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; - struct skl_pipe_wm wm; - } *hw; - const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal; - int level, max_level = ilk_wm_max_level(dev_priv); - struct intel_plane *plane; - u8 hw_enabled_slices; - - if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active) - return; - - hw = kzalloc(sizeof(*hw), GFP_KERNEL); - if (!hw) - return; - - skl_pipe_wm_get_hw_state(crtc, &hw->wm); - - skl_pipe_ddb_get_hw_state(crtc, hw->ddb, hw->ddb_y); - - hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv); - - if (DISPLAY_VER(dev_priv) >= 11 && - hw_enabled_slices != dev_priv->dbuf.enabled_slices) - drm_err(&dev_priv->drm, - "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n", - dev_priv->dbuf.enabled_slices, - hw_enabled_slices); - - for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { - const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; - const struct skl_wm_level *hw_wm_level, *sw_wm_level; - - /* Watermarks */ - for (level = 0; level <= max_level; level++) { - hw_wm_level = &hw->wm.planes[plane->id].wm[level]; - sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level); - - if (skl_wm_level_equals(hw_wm_level, sw_wm_level)) - continue; - - drm_err(&dev_priv->drm, - "[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", - plane->base.base.id, plane->base.name, level, - sw_wm_level->enable, - sw_wm_level->blocks, - sw_wm_level->lines, - hw_wm_level->enable, - hw_wm_level->blocks, - hw_wm_level->lines); - } - - hw_wm_level = &hw->wm.planes[plane->id].trans_wm; - sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id); - - if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) { - drm_err(&dev_priv->drm, - "[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", - plane->base.base.id, plane->base.name, - sw_wm_level->enable, - sw_wm_level->blocks, - sw_wm_level->lines, - hw_wm_level->enable, - hw_wm_level->blocks, - hw_wm_level->lines); - } - - hw_wm_level = &hw->wm.planes[plane->id].sagv.wm0; - sw_wm_level = &sw_wm->planes[plane->id].sagv.wm0; - - if (HAS_HW_SAGV_WM(dev_priv) && - !skl_wm_level_equals(hw_wm_level, sw_wm_level)) { - drm_err(&dev_priv->drm, - "[PLANE:%d:%s] mismatch in SAGV WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", - plane->base.base.id, plane->base.name, - sw_wm_level->enable, - sw_wm_level->blocks, - sw_wm_level->lines, - hw_wm_level->enable, - hw_wm_level->blocks, - hw_wm_level->lines); - } - - hw_wm_level = &hw->wm.planes[plane->id].sagv.trans_wm; - sw_wm_level = &sw_wm->planes[plane->id].sagv.trans_wm; - - if (HAS_HW_SAGV_WM(dev_priv) && - !skl_wm_level_equals(hw_wm_level, sw_wm_level)) { - drm_err(&dev_priv->drm, - "[PLANE:%d:%s] mismatch in SAGV trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", - plane->base.base.id, plane->base.name, - sw_wm_level->enable, - sw_wm_level->blocks, - sw_wm_level->lines, - hw_wm_level->enable, - hw_wm_level->blocks, - hw_wm_level->lines); - } - - /* DDB */ - hw_ddb_entry = &hw->ddb[PLANE_CURSOR]; - sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb[PLANE_CURSOR]; - - if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { - drm_err(&dev_priv->drm, - "[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n", - plane->base.base.id, plane->base.name, - sw_ddb_entry->start, sw_ddb_entry->end, - hw_ddb_entry->start, hw_ddb_entry->end); - } - } - - kfree(hw); -} - -void intel_enable_ipc(struct drm_i915_private *dev_priv) -{ - u32 val; - - if (!HAS_IPC(dev_priv)) - return; - - val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2); - - if (dev_priv->ipc_enabled) - val |= DISP_IPC_ENABLE; - else - val &= ~DISP_IPC_ENABLE; - - intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val); -} - -static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv) -{ - /* Display WA #0477 WaDisableIPC: skl */ - if (IS_SKYLAKE(dev_priv)) - return false; - - /* Display WA #1141: SKL:all KBL:all CFL */ - if (IS_KABYLAKE(dev_priv) || - IS_COFFEELAKE(dev_priv) || - IS_COMETLAKE(dev_priv)) - return dev_priv->dram_info.symmetric_memory; - - return true; -} - -void intel_init_ipc(struct drm_i915_private *dev_priv) -{ - if (!HAS_IPC(dev_priv)) - return; - - dev_priv->ipc_enabled = intel_can_enable_ipc(dev_priv); - - intel_enable_ipc(dev_priv); -} - static void ibx_init_clock_gating(struct drm_i915_private *dev_priv) { /* @@ -7435,7 +4205,7 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv) val = intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN2(pipe)); val |= TRANS_CHICKEN2_TIMING_OVERRIDE; val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED; - if (dev_priv->vbt.fdi_rx_polarity_inverted) + if (dev_priv->display.vbt.fdi_rx_polarity_inverted) val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED; val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER; val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH; @@ -7586,9 +4356,8 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv) static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv) { - /* Wa_1409120013:tgl,rkl,adl-s,dg1,dg2 */ - if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv) || - IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv) || IS_DG2(dev_priv)) + /* Wa_1409120013 */ + if (DISPLAY_VER(dev_priv) == 12) intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), DPFC_CHICKEN_COMP_DUMMY_PIXEL); @@ -7965,7 +4734,7 @@ static void g4x_init_clock_gating(struct drm_i915_private *dev_priv) OVCUNIT_CLOCK_GATE_DISABLE; if (IS_GM45(dev_priv)) dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; - intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D, dspclk_gate); + intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D(dev_priv), dspclk_gate); g4x_disable_trickle_feed(dev_priv); } @@ -7976,7 +4745,7 @@ static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv) intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); intel_uncore_write(uncore, RENCLK_GATE_D2, 0); - intel_uncore_write(uncore, DSPCLK_GATE_D, 0); + intel_uncore_write(uncore, DSPCLK_GATE_D(dev_priv), 0); intel_uncore_write(uncore, RAMCLK_GATE_D, 0); intel_uncore_write16(uncore, DEUC, 0); intel_uncore_write(uncore, @@ -8168,18 +4937,14 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) } } -static const struct drm_i915_wm_disp_funcs skl_wm_funcs = { - .compute_global_watermarks = skl_compute_wm, -}; - -static const struct drm_i915_wm_disp_funcs ilk_wm_funcs = { +static const struct intel_wm_funcs ilk_wm_funcs = { .compute_pipe_wm = ilk_compute_pipe_wm, .compute_intermediate_wm = ilk_compute_intermediate_wm, .initial_watermarks = ilk_initial_watermarks, .optimize_watermarks = ilk_optimize_watermarks, }; -static const struct drm_i915_wm_disp_funcs vlv_wm_funcs = { +static const struct intel_wm_funcs vlv_wm_funcs = { .compute_pipe_wm = vlv_compute_pipe_wm, .compute_intermediate_wm = vlv_compute_intermediate_wm, .initial_watermarks = vlv_initial_watermarks, @@ -8187,67 +4952,67 @@ static const struct drm_i915_wm_disp_funcs vlv_wm_funcs = { .atomic_update_watermarks = vlv_atomic_update_fifo, }; -static const struct drm_i915_wm_disp_funcs g4x_wm_funcs = { +static const struct intel_wm_funcs g4x_wm_funcs = { .compute_pipe_wm = g4x_compute_pipe_wm, .compute_intermediate_wm = g4x_compute_intermediate_wm, .initial_watermarks = g4x_initial_watermarks, .optimize_watermarks = g4x_optimize_watermarks, }; -static const struct drm_i915_wm_disp_funcs pnv_wm_funcs = { +static const struct intel_wm_funcs pnv_wm_funcs = { .update_wm = pnv_update_wm, }; -static const struct drm_i915_wm_disp_funcs i965_wm_funcs = { +static const struct intel_wm_funcs i965_wm_funcs = { .update_wm = i965_update_wm, }; -static const struct drm_i915_wm_disp_funcs i9xx_wm_funcs = { +static const struct intel_wm_funcs i9xx_wm_funcs = { .update_wm = i9xx_update_wm, }; -static const struct drm_i915_wm_disp_funcs i845_wm_funcs = { +static const struct intel_wm_funcs i845_wm_funcs = { .update_wm = i845_update_wm, }; -static const struct drm_i915_wm_disp_funcs nop_funcs = { +static const struct intel_wm_funcs nop_funcs = { }; /* Set up chip specific power management-related functions */ void intel_init_pm(struct drm_i915_private *dev_priv) { + if (DISPLAY_VER(dev_priv) >= 9) { + skl_wm_init(dev_priv); + return; + } + /* For cxsr */ if (IS_PINEVIEW(dev_priv)) pnv_get_mem_freq(dev_priv); else if (GRAPHICS_VER(dev_priv) == 5) ilk_get_mem_freq(dev_priv); - intel_sagv_init(dev_priv); - /* For FIFO watermark updates */ - if (DISPLAY_VER(dev_priv) >= 9) { - skl_setup_wm_latency(dev_priv); - dev_priv->wm_disp = &skl_wm_funcs; - } else if (HAS_PCH_SPLIT(dev_priv)) { + if (HAS_PCH_SPLIT(dev_priv)) { ilk_setup_wm_latency(dev_priv); - if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->wm.pri_latency[1] && - dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) || - (DISPLAY_VER(dev_priv) != 5 && dev_priv->wm.pri_latency[0] && - dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { - dev_priv->wm_disp = &ilk_wm_funcs; + if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->display.wm.pri_latency[1] && + dev_priv->display.wm.spr_latency[1] && dev_priv->display.wm.cur_latency[1]) || + (DISPLAY_VER(dev_priv) != 5 && dev_priv->display.wm.pri_latency[0] && + dev_priv->display.wm.spr_latency[0] && dev_priv->display.wm.cur_latency[0])) { + dev_priv->display.funcs.wm = &ilk_wm_funcs; } else { drm_dbg_kms(&dev_priv->drm, "Failed to read display plane latency. " "Disable CxSR\n"); - dev_priv->wm_disp = &nop_funcs; + dev_priv->display.funcs.wm = &nop_funcs; } } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { vlv_setup_wm_latency(dev_priv); - dev_priv->wm_disp = &vlv_wm_funcs; + dev_priv->display.funcs.wm = &vlv_wm_funcs; } else if (IS_G4X(dev_priv)) { g4x_setup_wm_latency(dev_priv); - dev_priv->wm_disp = &g4x_wm_funcs; + dev_priv->display.funcs.wm = &g4x_wm_funcs; } else if (IS_PINEVIEW(dev_priv)) { if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv), dev_priv->is_ddr3, @@ -8261,22 +5026,22 @@ void intel_init_pm(struct drm_i915_private *dev_priv) dev_priv->fsb_freq, dev_priv->mem_freq); /* Disable CxSR and never update its watermark again */ intel_set_memory_cxsr(dev_priv, false); - dev_priv->wm_disp = &nop_funcs; + dev_priv->display.funcs.wm = &nop_funcs; } else - dev_priv->wm_disp = &pnv_wm_funcs; + dev_priv->display.funcs.wm = &pnv_wm_funcs; } else if (DISPLAY_VER(dev_priv) == 4) { - dev_priv->wm_disp = &i965_wm_funcs; + dev_priv->display.funcs.wm = &i965_wm_funcs; } else if (DISPLAY_VER(dev_priv) == 3) { - dev_priv->wm_disp = &i9xx_wm_funcs; + dev_priv->display.funcs.wm = &i9xx_wm_funcs; } else if (DISPLAY_VER(dev_priv) == 2) { if (INTEL_NUM_PIPES(dev_priv) == 1) - dev_priv->wm_disp = &i845_wm_funcs; + dev_priv->display.funcs.wm = &i845_wm_funcs; else - dev_priv->wm_disp = &i9xx_wm_funcs; + dev_priv->display.funcs.wm = &i9xx_wm_funcs; } else { drm_err(&dev_priv->drm, "unexpected fall-through in %s\n", __func__); - dev_priv->wm_disp = &nop_funcs; + dev_priv->display.funcs.wm = &nop_funcs; } } @@ -8285,183 +5050,3 @@ void intel_pm_setup(struct drm_i915_private *dev_priv) dev_priv->runtime_pm.suspended = false; atomic_set(&dev_priv->runtime_pm.wakeref_count, 0); } - -static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj) -{ - struct intel_dbuf_state *dbuf_state; - - dbuf_state = kmemdup(obj->state, sizeof(*dbuf_state), GFP_KERNEL); - if (!dbuf_state) - return NULL; - - return &dbuf_state->base; -} - -static void intel_dbuf_destroy_state(struct intel_global_obj *obj, - struct intel_global_state *state) -{ - kfree(state); -} - -static const struct intel_global_state_funcs intel_dbuf_funcs = { - .atomic_duplicate_state = intel_dbuf_duplicate_state, - .atomic_destroy_state = intel_dbuf_destroy_state, -}; - -struct intel_dbuf_state * -intel_atomic_get_dbuf_state(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_global_state *dbuf_state; - - dbuf_state = intel_atomic_get_global_obj_state(state, &dev_priv->dbuf.obj); - if (IS_ERR(dbuf_state)) - return ERR_CAST(dbuf_state); - - return to_intel_dbuf_state(dbuf_state); -} - -int intel_dbuf_init(struct drm_i915_private *dev_priv) -{ - struct intel_dbuf_state *dbuf_state; - - dbuf_state = kzalloc(sizeof(*dbuf_state), GFP_KERNEL); - if (!dbuf_state) - return -ENOMEM; - - intel_atomic_global_obj_init(dev_priv, &dev_priv->dbuf.obj, - &dbuf_state->base, &intel_dbuf_funcs); - - return 0; -} - -/* - * Configure MBUS_CTL and all DBUF_CTL_S of each slice to join_mbus state before - * update the request state of all DBUS slices. - */ -static void update_mbus_pre_enable(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - u32 mbus_ctl, dbuf_min_tracker_val; - enum dbuf_slice slice; - const struct intel_dbuf_state *dbuf_state = - intel_atomic_get_new_dbuf_state(state); - - if (!HAS_MBUS_JOINING(dev_priv)) - return; - - /* - * TODO: Implement vblank synchronized MBUS joining changes. - * Must be properly coordinated with dbuf reprogramming. - */ - if (dbuf_state->joined_mbus) { - mbus_ctl = MBUS_HASHING_MODE_1x4 | MBUS_JOIN | - MBUS_JOIN_PIPE_SELECT_NONE; - dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(3); - } else { - mbus_ctl = MBUS_HASHING_MODE_2x2 | - MBUS_JOIN_PIPE_SELECT_NONE; - dbuf_min_tracker_val = DBUF_MIN_TRACKER_STATE_SERVICE(1); - } - - intel_de_rmw(dev_priv, MBUS_CTL, - MBUS_HASHING_MODE_MASK | MBUS_JOIN | - MBUS_JOIN_PIPE_SELECT_MASK, mbus_ctl); - - for_each_dbuf_slice(dev_priv, slice) - intel_de_rmw(dev_priv, DBUF_CTL_S(slice), - DBUF_MIN_TRACKER_STATE_SERVICE_MASK, - dbuf_min_tracker_val); -} - -void intel_dbuf_pre_plane_update(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_dbuf_state *new_dbuf_state = - intel_atomic_get_new_dbuf_state(state); - const struct intel_dbuf_state *old_dbuf_state = - intel_atomic_get_old_dbuf_state(state); - - if (!new_dbuf_state || - ((new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices) - && (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))) - return; - - WARN_ON(!new_dbuf_state->base.changed); - - update_mbus_pre_enable(state); - gen9_dbuf_slices_update(dev_priv, - old_dbuf_state->enabled_slices | - new_dbuf_state->enabled_slices); -} - -void intel_dbuf_post_plane_update(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_dbuf_state *new_dbuf_state = - intel_atomic_get_new_dbuf_state(state); - const struct intel_dbuf_state *old_dbuf_state = - intel_atomic_get_old_dbuf_state(state); - - if (!new_dbuf_state || - ((new_dbuf_state->enabled_slices == old_dbuf_state->enabled_slices) - && (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus))) - return; - - WARN_ON(!new_dbuf_state->base.changed); - - gen9_dbuf_slices_update(dev_priv, - new_dbuf_state->enabled_slices); -} - -void intel_mbus_dbox_update(struct intel_atomic_state *state) -{ - struct drm_i915_private *i915 = to_i915(state->base.dev); - const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state; - const struct intel_crtc_state *new_crtc_state; - const struct intel_crtc *crtc; - u32 val = 0; - int i; - - if (DISPLAY_VER(i915) < 11) - return; - - new_dbuf_state = intel_atomic_get_new_dbuf_state(state); - old_dbuf_state = intel_atomic_get_old_dbuf_state(state); - if (!new_dbuf_state || - (new_dbuf_state->joined_mbus == old_dbuf_state->joined_mbus && - new_dbuf_state->active_pipes == old_dbuf_state->active_pipes)) - return; - - if (DISPLAY_VER(i915) >= 12) { - val |= MBUS_DBOX_B2B_TRANSACTIONS_MAX(16); - val |= MBUS_DBOX_B2B_TRANSACTIONS_DELAY(1); - val |= MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN; - } - - /* Wa_22010947358:adl-p */ - if (IS_ALDERLAKE_P(i915)) - val |= new_dbuf_state->joined_mbus ? MBUS_DBOX_A_CREDIT(6) : - MBUS_DBOX_A_CREDIT(4); - else - val |= MBUS_DBOX_A_CREDIT(2); - - if (IS_ALDERLAKE_P(i915)) { - val |= MBUS_DBOX_BW_CREDIT(2); - val |= MBUS_DBOX_B_CREDIT(8); - } else if (DISPLAY_VER(i915) >= 12) { - val |= MBUS_DBOX_BW_CREDIT(2); - val |= MBUS_DBOX_B_CREDIT(12); - } else { - val |= MBUS_DBOX_BW_CREDIT(1); - val |= MBUS_DBOX_B_CREDIT(8); - } - - for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { - if (!new_crtc_state->hw.active || - !intel_crtc_needs_modeset(new_crtc_state)) - continue; - - intel_de_write(i915, PIPE_MBUS_DBOX_CTL(crtc->pipe), val); - } -} diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h index 945503ae493e..c09b872d65c8 100644 --- a/drivers/gpu/drm/i915/intel_pm.h +++ b/drivers/gpu/drm/i915/intel_pm.h @@ -8,22 +8,9 @@ #include <linux/types.h> -#include "display/intel_display.h" -#include "display/intel_global_state.h" - -#include "i915_drv.h" - -struct drm_device; struct drm_i915_private; -struct i915_request; -struct intel_atomic_state; -struct intel_bw_state; -struct intel_crtc; struct intel_crtc_state; -struct intel_plane; -struct skl_ddb_entry; -struct skl_pipe_wm; -struct skl_wm_level; +struct intel_plane_state; void intel_init_clock_gating(struct drm_i915_private *dev_priv); void intel_suspend_hw(struct drm_i915_private *dev_priv); @@ -34,56 +21,14 @@ void intel_pm_setup(struct drm_i915_private *dev_priv); void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv); void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv); void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv); -void skl_wm_get_hw_state(struct drm_i915_private *dev_priv); -void intel_wm_state_verify(struct intel_crtc *crtc, - struct intel_crtc_state *new_crtc_state); -u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv); -void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv); -u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv, - const struct skl_ddb_entry *entry); void g4x_wm_sanitize(struct drm_i915_private *dev_priv); void vlv_wm_sanitize(struct drm_i915_private *dev_priv); -void skl_wm_sanitize(struct drm_i915_private *dev_priv); -bool intel_can_enable_sagv(struct drm_i915_private *dev_priv, - const struct intel_bw_state *bw_state); -void intel_sagv_pre_plane_update(struct intel_atomic_state *state); -void intel_sagv_post_plane_update(struct intel_atomic_state *state); -bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, - const struct skl_ddb_entry *entries, - int num_entries, int ignore_idx); -void skl_write_plane_wm(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state); -void skl_write_cursor_wm(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state); bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv); -void intel_init_ipc(struct drm_i915_private *dev_priv); -void intel_enable_ipc(struct drm_i915_private *dev_priv); +bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); +void intel_print_wm_latency(struct drm_i915_private *dev_priv, + const char *name, const u16 wm[]); bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); -struct intel_dbuf_state { - struct intel_global_state base; - - struct skl_ddb_entry ddb[I915_MAX_PIPES]; - unsigned int weight[I915_MAX_PIPES]; - u8 slices[I915_MAX_PIPES]; - u8 enabled_slices; - u8 active_pipes; - bool joined_mbus; -}; - -struct intel_dbuf_state * -intel_atomic_get_dbuf_state(struct intel_atomic_state *state); - -#define to_intel_dbuf_state(x) container_of((x), struct intel_dbuf_state, base) -#define intel_atomic_get_old_dbuf_state(state) \ - to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->dbuf.obj)) -#define intel_atomic_get_new_dbuf_state(state) \ - to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->dbuf.obj)) - -int intel_dbuf_init(struct drm_i915_private *dev_priv); -void intel_dbuf_pre_plane_update(struct intel_atomic_state *state); -void intel_dbuf_post_plane_update(struct intel_atomic_state *state); -void intel_mbus_dbox_update(struct intel_atomic_state *state); - #endif /* __INTEL_PM_H__ */ diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index c7ef5f2ff2e1..5cd423c7b646 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -21,6 +21,7 @@ * IN THE SOFTWARE. */ +#include <drm/drm_managed.h> #include <linux/pm_runtime.h> #include "gt/intel_engine_regs.h" @@ -44,29 +45,47 @@ fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains) } void -intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug) +intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915) { - spin_lock_init(&mmio_debug->lock); - mmio_debug->unclaimed_mmio_check = 1; + spin_lock_init(&i915->mmio_debug.lock); + i915->mmio_debug.unclaimed_mmio_check = 1; + + i915->uncore.debug = &i915->mmio_debug; } -static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug) +static void mmio_debug_suspend(struct intel_uncore *uncore) { - lockdep_assert_held(&mmio_debug->lock); + if (!uncore->debug) + return; + + spin_lock(&uncore->debug->lock); /* Save and disable mmio debugging for the user bypass */ - if (!mmio_debug->suspend_count++) { - mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check; - mmio_debug->unclaimed_mmio_check = 0; + if (!uncore->debug->suspend_count++) { + uncore->debug->saved_mmio_check = uncore->debug->unclaimed_mmio_check; + uncore->debug->unclaimed_mmio_check = 0; } + + spin_unlock(&uncore->debug->lock); } -static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug) +static bool check_for_unclaimed_mmio(struct intel_uncore *uncore); + +static void mmio_debug_resume(struct intel_uncore *uncore) { - lockdep_assert_held(&mmio_debug->lock); + if (!uncore->debug) + return; + + spin_lock(&uncore->debug->lock); + + if (!--uncore->debug->suspend_count) + uncore->debug->unclaimed_mmio_check = uncore->debug->saved_mmio_check; - if (!--mmio_debug->suspend_count) - mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check; + if (check_for_unclaimed_mmio(uncore)) + drm_info(&uncore->i915->drm, + "Invalid mmio detected during user access\n"); + + spin_unlock(&uncore->debug->lock); } static const char * const forcewake_domain_names[] = { @@ -677,9 +696,7 @@ void intel_uncore_forcewake_user_get(struct intel_uncore *uncore) spin_lock_irq(&uncore->lock); if (!uncore->user_forcewake_count++) { intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL); - spin_lock(&uncore->debug->lock); - mmio_debug_suspend(uncore->debug); - spin_unlock(&uncore->debug->lock); + mmio_debug_suspend(uncore); } spin_unlock_irq(&uncore->lock); } @@ -695,14 +712,7 @@ void intel_uncore_forcewake_user_put(struct intel_uncore *uncore) { spin_lock_irq(&uncore->lock); if (!--uncore->user_forcewake_count) { - spin_lock(&uncore->debug->lock); - mmio_debug_resume(uncore->debug); - - if (check_for_unclaimed_mmio(uncore)) - drm_info(&uncore->i915->drm, - "Invalid mmio detected during user access\n"); - spin_unlock(&uncore->debug->lock); - + mmio_debug_resume(uncore); intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL); } spin_unlock_irq(&uncore->lock); @@ -918,6 +928,9 @@ find_fw_domain(struct intel_uncore *uncore, u32 offset) { const struct intel_forcewake_range *entry; + if (IS_GSI_REG(offset)) + offset += uncore->gsi_offset; + entry = BSEARCH(offset, uncore->fw_domains_table, uncore->fw_domains_table_entries, @@ -1133,6 +1146,9 @@ static bool is_shadowed(struct intel_uncore *uncore, u32 offset) if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table)) return false; + if (IS_GSI_REG(offset)) + offset += uncore->gsi_offset; + return BSEARCH(offset, uncore->shadowed_reg_table, uncore->shadowed_reg_table_entries, @@ -1704,7 +1720,7 @@ unclaimed_reg_debug(struct intel_uncore *uncore, const bool read, const bool before) { - if (likely(!uncore->i915->params.mmio_debug)) + if (likely(!uncore->i915->params.mmio_debug) || !uncore->debug) return; /* interrupts are disabled and re-enabled around uncore->lock usage */ @@ -1985,8 +2001,8 @@ static int __fw_domain_init(struct intel_uncore *uncore, d->uncore = uncore; d->wake_count = 0; - d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set); - d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack); + d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set) + uncore->gsi_offset; + d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack) + uncore->gsi_offset; d->id = domain_id; @@ -2070,7 +2086,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) if (GRAPHICS_VER(i915) >= 11) { /* we'll prune the domains of missing engines later */ - intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask; + intel_engine_mask_t emask = RUNTIME_INFO(i915)->platform_engine_mask; int i; uncore->fw_get_funcs = &uncore_get_fallback; @@ -2223,6 +2239,11 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb, return NOTIFY_OK; } +static void uncore_unmap_mmio(struct drm_device *drm, void *regs) +{ + iounmap(regs); +} + int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr) { struct drm_i915_private *i915 = uncore->i915; @@ -2251,12 +2272,7 @@ int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr) return -EIO; } - return 0; -} - -void intel_uncore_cleanup_mmio(struct intel_uncore *uncore) -{ - iounmap(uncore->regs); + return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio, uncore->regs); } void intel_uncore_init_early(struct intel_uncore *uncore, @@ -2266,7 +2282,6 @@ void intel_uncore_init_early(struct intel_uncore *uncore, uncore->i915 = gt->i915; uncore->gt = gt; uncore->rpm = >->i915->runtime_pm; - uncore->debug = >->i915->mmio_debug; } static void uncore_raw_init(struct intel_uncore *uncore) @@ -2446,8 +2461,11 @@ void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore, } } -void intel_uncore_fini_mmio(struct intel_uncore *uncore) +/* Called via drm-managed action */ +void intel_uncore_fini_mmio(struct drm_device *dev, void *data) { + struct intel_uncore *uncore = data; + if (intel_uncore_has_forcewake(uncore)) { iosf_mbi_punit_acquire(); iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( @@ -2577,6 +2595,9 @@ bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore) { bool ret; + if (!uncore->debug) + return false; + spin_lock_irq(&uncore->debug->lock); ret = check_for_unclaimed_mmio(uncore); spin_unlock_irq(&uncore->debug->lock); @@ -2589,6 +2610,9 @@ intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore) { bool ret = false; + if (drm_WARN_ON(&uncore->i915->drm, !uncore->debug)) + return false; + spin_lock_irq(&uncore->debug->lock); if (unlikely(uncore->debug->unclaimed_mmio_check <= 0)) diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index b1fa912a65e7..5022bac80b67 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -33,6 +33,7 @@ #include "i915_reg_defs.h" +struct drm_device; struct drm_i915_private; struct intel_runtime_pm; struct intel_uncore; @@ -135,6 +136,16 @@ struct intel_uncore { spinlock_t lock; /** lock is also taken in irq contexts. */ + /* + * Do we need to apply an additional offset to reach the beginning + * of the basic non-engine GT registers (referred to as "GSI" on + * newer platforms, or "GT block" on older platforms)? If so, we'll + * track that here and apply it transparently to registers in the + * appropriate range to maintain compatibility with our existing + * register definitions and GT code. + */ + u32 gsi_offset; + unsigned int flags; #define UNCORE_HAS_FORCEWAKE BIT(0) #define UNCORE_HAS_FPGA_DBG_UNCLAIMED BIT(1) @@ -210,8 +221,7 @@ intel_uncore_has_fifo(const struct intel_uncore *uncore) return uncore->flags & UNCORE_HAS_FIFO; } -void -intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug); +void intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915); void intel_uncore_init_early(struct intel_uncore *uncore, struct intel_gt *gt); int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr); @@ -221,7 +231,7 @@ void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore, bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore); bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore); void intel_uncore_cleanup_mmio(struct intel_uncore *uncore); -void intel_uncore_fini_mmio(struct intel_uncore *uncore); +void intel_uncore_fini_mmio(struct drm_device *dev, void *data); void intel_uncore_suspend(struct intel_uncore *uncore); void intel_uncore_resume_early(struct intel_uncore *uncore); void intel_uncore_runtime_resume(struct intel_uncore *uncore); @@ -294,19 +304,27 @@ intel_wait_for_register_fw(struct intel_uncore *uncore, 2, timeout_ms, NULL); } +#define IS_GSI_REG(reg) ((reg) < 0x40000) + /* register access functions */ #define __raw_read(x__, s__) \ static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \ i915_reg_t reg) \ { \ - return read##s__(uncore->regs + i915_mmio_reg_offset(reg)); \ + u32 offset = i915_mmio_reg_offset(reg); \ + if (IS_GSI_REG(offset)) \ + offset += uncore->gsi_offset; \ + return read##s__(uncore->regs + offset); \ } #define __raw_write(x__, s__) \ static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \ i915_reg_t reg, u##x__ val) \ { \ - write##s__(val, uncore->regs + i915_mmio_reg_offset(reg)); \ + u32 offset = i915_mmio_reg_offset(reg); \ + if (IS_GSI_REG(offset)) \ + offset += uncore->gsi_offset; \ + write##s__(val, uncore->regs + offset); \ } __raw_read(8, b) __raw_read(16, w) @@ -447,6 +465,18 @@ static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore, return (reg_val & mask) != expected_val ? -EINVAL : 0; } +/* + * The raw_reg_{read,write} macros are intended as a micro-optimization for + * interrupt handlers so that the pointer indirection on uncore->regs can + * be computed once (and presumably cached in a register) instead of generating + * extra load instructions for each MMIO access. + * + * Given that these macros are only intended for non-GSI interrupt registers + * (and the goal is to avoid extra instructions generated by the compiler), + * these macros do not account for uncore->gsi_offset. Any caller that needs + * to use these macros on a GSI register is responsible for adding the + * appropriate GSI offset to the 'base' parameter. + */ #define raw_reg_read(base, reg) \ readl(base + i915_mmio_reg_offset(reg)) #define raw_reg_write(base, reg, value) \ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c index 17109c513259..69cdaaddc4a9 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c @@ -169,11 +169,11 @@ static void pxp_queue_termination(struct intel_pxp *pxp) * We want to get the same effect as if we received a termination * interrupt, so just pretend that we did. */ - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); intel_pxp_mark_termination_in_progress(pxp); pxp->session_events |= PXP_TERMINATION_REQUEST; queue_work(system_unbound_wq, &pxp->session_work); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); } static bool pxp_component_bound(struct intel_pxp *pxp) diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c index e888b5124a07..4359e8be4101 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c @@ -47,9 +47,9 @@ static int pxp_terminate_set(void *data, u64 val) return -ENODEV; /* simulate a termination interrupt */ - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); intel_pxp_irq_handler(pxp, GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(100))) diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c index 04745f914407..c28be430718a 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c @@ -25,7 +25,7 @@ void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir) if (GEM_WARN_ON(!intel_pxp_is_enabled(pxp))) return; - lockdep_assert_held(>->irq_lock); + lockdep_assert_held(gt->irq_lock); if (unlikely(!iir)) return; @@ -55,16 +55,16 @@ static inline void __pxp_set_interrupts(struct intel_gt *gt, u32 interrupts) static inline void pxp_irq_reset(struct intel_gt *gt) { - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); gen11_gt_reset_one_iir(gt, 0, GEN11_KCR); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); } void intel_pxp_irq_enable(struct intel_pxp *pxp) { struct intel_gt *gt = pxp_to_gt(pxp); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); if (!pxp->irq_enabled) WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_KCR)); @@ -72,7 +72,7 @@ void intel_pxp_irq_enable(struct intel_pxp *pxp) __pxp_set_interrupts(gt, GEN12_PXP_INTERRUPTS); pxp->irq_enabled = true; - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); } void intel_pxp_irq_disable(struct intel_pxp *pxp) @@ -88,12 +88,12 @@ void intel_pxp_irq_disable(struct intel_pxp *pxp) */ GEM_WARN_ON(intel_pxp_is_active(pxp)); - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); pxp->irq_enabled = false; __pxp_set_interrupts(gt, 0); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); intel_synchronize_irq(gt->i915); pxp_irq_reset(gt); diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c index 92b00b4de240..1bb5b5249157 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c @@ -144,9 +144,9 @@ void intel_pxp_session_work(struct work_struct *work) intel_wakeref_t wakeref; u32 events = 0; - spin_lock_irq(>->irq_lock); + spin_lock_irq(gt->irq_lock); events = fetch_and_zero(&pxp->session_events); - spin_unlock_irq(>->irq_lock); + spin_unlock_irq(gt->irq_lock); if (!events) return; diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 9c31a16f8380..fff11c90f1fa 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -115,6 +115,7 @@ static struct dev_pm_domain pm_domain = { static void mock_gt_probe(struct drm_i915_private *i915) { i915->gt[0] = &i915->gt0; + i915->gt[0]->name = "Mock GT"; } struct drm_i915_private *mock_gem_device(void) @@ -172,14 +173,14 @@ struct drm_i915_private *mock_gem_device(void) /* Using the global GTT may ask questions about KMS users, so prepare */ drm_mode_config_init(&i915->drm); - mkwrite_device_info(i915)->graphics.ver = -1; + RUNTIME_INFO(i915)->graphics.ip.ver = -1; - mkwrite_device_info(i915)->page_sizes = + RUNTIME_INFO(i915)->page_sizes = I915_GTT_PAGE_SIZE_4K | I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_2M; - mkwrite_device_info(i915)->memory_regions = REGION_SMEM; + RUNTIME_INFO(i915)->memory_regions = REGION_SMEM; intel_memory_regions_hw_probe(i915); spin_lock_init(&i915->gpu_error.lock); @@ -209,7 +210,7 @@ struct drm_i915_private *mock_gem_device(void) mock_init_ggtt(to_gt(i915)); to_gt(i915)->vm = i915_vm_get(&to_gt(i915)->ggtt->vm); - mkwrite_device_info(i915)->platform_engine_mask = BIT(0); + RUNTIME_INFO(i915)->platform_engine_mask = BIT(0); to_gt(i915)->info.engine_mask = BIT(0); to_gt(i915)->engine[RCS0] = mock_engine(i915, "mock", RCS0); diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c index dfa942ca62da..9d085c05c49c 100644 --- a/drivers/gpu/drm/mediatek/mtk_dp.c +++ b/drivers/gpu/drm/mediatek/mtk_dp.c @@ -1222,8 +1222,8 @@ static void mtk_dp_video_mute(struct mtk_dp *mtk_dp, bool enable) mtk_dp->data->smc_cmd, enable, 0, 0, 0, 0, 0, &res); - dev_dbg(mtk_dp->dev, "smc cmd: 0x%x, p1: 0x%x, ret: 0x%lx-0x%lx\n", - mtk_dp->data->smc_cmd, enable, res.a0, res.a1); + dev_dbg(mtk_dp->dev, "smc cmd: 0x%x, p1: %s, ret: 0x%lx-0x%lx\n", + mtk_dp->data->smc_cmd, enable ? "enable" : "disable", res.a0, res.a1); } static void mtk_dp_audio_mute(struct mtk_dp *mtk_dp, bool mute) @@ -1933,39 +1933,41 @@ static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge) bool enabled = mtk_dp->enabled; u8 sink_count = 0; - if (mtk_dp->train_info.cable_plugged_in) { - if (!enabled) { - /* power on aux */ - mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE, - DP_PWR_STATE_BANDGAP_TPLL_LANE, - DP_PWR_STATE_MASK); - - /* power on panel */ - drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); - usleep_range(2000, 5000); - } - /* - * Some dongles still source HPD when they do not connect to any - * sink device. To avoid this, we need to read the sink count - * to make sure we do connect to sink devices. After this detect - * function, we just need to check the HPD connection to check - * whether we connect to a sink device. - */ - drm_dp_dpcd_readb(&mtk_dp->aux, DP_SINK_COUNT, &sink_count); - if (DP_GET_SINK_COUNT(sink_count)) - ret = connector_status_connected; - - if (!enabled) { - /* power off panel */ - drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D3); - usleep_range(2000, 3000); - - /* power off aux */ - mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE, - DP_PWR_STATE_BANDGAP_TPLL, - DP_PWR_STATE_MASK); - } + if (!mtk_dp->train_info.cable_plugged_in) + return ret; + + if (!enabled) { + /* power on aux */ + mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE, + DP_PWR_STATE_BANDGAP_TPLL_LANE, + DP_PWR_STATE_MASK); + + /* power on panel */ + drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); + usleep_range(2000, 5000); } + /* + * Some dongles still source HPD when they do not connect to any + * sink device. To avoid this, we need to read the sink count + * to make sure we do connect to sink devices. After this detect + * function, we just need to check the HPD connection to check + * whether we connect to a sink device. + */ + drm_dp_dpcd_readb(&mtk_dp->aux, DP_SINK_COUNT, &sink_count); + if (DP_GET_SINK_COUNT(sink_count)) + ret = connector_status_connected; + + if (!enabled) { + /* power off panel */ + drm_dp_dpcd_writeb(&mtk_dp->aux, DP_SET_POWER, DP_SET_POWER_D3); + usleep_range(2000, 3000); + + /* power off aux */ + mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE, + DP_PWR_STATE_BANDGAP_TPLL, + DP_PWR_STATE_MASK); + } + return ret; } @@ -2642,7 +2644,7 @@ static const struct of_device_id mtk_dp_of_match[] = { }; MODULE_DEVICE_TABLE(of, mtk_dp_of_match); -struct platform_driver mtk_dp_driver = { +static struct platform_driver mtk_dp_driver = { .probe = mtk_dp_probe, .remove = mtk_dp_remove, .driver = { diff --git a/drivers/gpu/drm/mediatek/mtk_dp_reg.h b/drivers/gpu/drm/mediatek/mtk_dp_reg.h index 096ad6572a5e..84e38cef03c2 100644 --- a/drivers/gpu/drm/mediatek/mtk_dp_reg.h +++ b/drivers/gpu/drm/mediatek/mtk_dp_reg.h @@ -153,8 +153,6 @@ #define CH_STATUS_1_DP_ENC0_P0_MASK GENMASK(15, 0) #define MTK_DP_ENC0_P0_3094 0x3094 #define CH_STATUS_2_DP_ENC0_P0_MASK GENMASK(7, 0) -#define MTK_DP_ENC0_P0_30A0 0x30a0 -#define DP_ENC0_30A0_MASK (BIT(7) | BIT(8) | BIT(12)) #define MTK_DP_ENC0_P0_30A4 0x30a4 #define AU_TS_CFG_DP_ENC0_P0_MASK GENMASK(7, 0) #define MTK_DP_ENC0_P0_30A8 0x30a8 @@ -171,8 +169,6 @@ #define MTK_DP_ENC0_P0_312C 0x312c #define ASP_HB2_DP_ENC0_P0_MASK GENMASK(7, 0) #define ASP_HB3_DP_ENC0_P0_MASK GENMASK(15, 8) -#define MTK_DP_ENC0_P0_3130 0x3130 -#define MTK_DP_ENC0_P0_3138 0x3138 #define MTK_DP_ENC0_P0_3154 0x3154 #define PGEN_HTOTAL_DP_ENC0_P0_MASK GENMASK(13, 0) #define MTK_DP_ENC0_P0_3158 0x3158 @@ -206,8 +202,6 @@ #define SDP_PACKET_TYPE_DP_ENC1_P0_MASK GENMASK(4, 0) #define SDP_PACKET_W_DP_ENC1_P0 BIT(5) #define SDP_PACKET_W_DP_ENC1_P0_MASK BIT(5) -#define MTK_DP_ENC1_P0_328C 0x328c -#define VSC_DATA_RDY_VESA_DP_ENC1_P0_MASK BIT(7) #define MTK_DP_ENC1_P0_3300 0x3300 #define VIDEO_AFIFO_RDY_SEL_DP_ENC1_P0_VAL 2 #define VIDEO_AFIFO_RDY_SEL_DP_ENC1_P0_MASK GENMASK(9, 8) diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index 630a4e301ef6..508a6d994e83 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -11,7 +11,6 @@ #include <linux/media-bus-format.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/of_gpio.h> #include <linux/of_graph.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 5f02f8d0e4fc..91f58db5915f 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -833,11 +833,8 @@ static int mtk_drm_sys_prepare(struct device *dev) { struct mtk_drm_private *private = dev_get_drvdata(dev); struct drm_device *drm = private->drm; - int ret; - - ret = drm_mode_config_helper_suspend(drm); - return ret; + return drm_mode_config_helper_suspend(drm); } static void mtk_drm_sys_complete(struct device *dev) diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 3196189429bc..4c80b6896dc3 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -16,7 +16,6 @@ #include <linux/mutex.h> #include <linux/of_platform.h> #include <linux/of.h> -#include <linux/of_gpio.h> #include <linux/of_graph.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index fcf0d493782c..3b24a924b7b9 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -388,10 +388,14 @@ static void meson_drv_unbind(struct device *dev) drm_dev_unregister(drm); drm_kms_helper_poll_fini(drm); drm_atomic_helper_shutdown(drm); - component_unbind_all(dev, drm); free_irq(priv->vsync_irq, drm); drm_dev_put(drm); + meson_encoder_hdmi_remove(priv); + meson_encoder_cvbs_remove(priv); + + component_unbind_all(dev, drm); + if (priv->afbcd.ops) priv->afbcd.ops->exit(priv); } @@ -493,6 +497,13 @@ static int meson_drv_probe(struct platform_device *pdev) return 0; }; +static int meson_drv_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &meson_drv_master_ops); + + return 0; +} + static struct meson_drm_match_data meson_drm_gxbb_data = { .compat = VPU_COMPATIBLE_GXBB, }; @@ -530,6 +541,7 @@ static const struct dev_pm_ops meson_drv_pm_ops = { static struct platform_driver meson_drm_platform_driver = { .probe = meson_drv_probe, + .remove = meson_drv_remove, .shutdown = meson_drv_shutdown, .driver = { .name = "meson-drm", diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h index 177dac3ca3be..c62ee358456f 100644 --- a/drivers/gpu/drm/meson/meson_drv.h +++ b/drivers/gpu/drm/meson/meson_drv.h @@ -25,6 +25,12 @@ enum vpu_compatible { VPU_COMPATIBLE_G12A = 3, }; +enum { + MESON_ENC_CVBS = 0, + MESON_ENC_HDMI, + MESON_ENC_LAST, +}; + struct meson_drm_match_data { enum vpu_compatible compat; struct meson_afbcd_ops *afbcd_ops; @@ -51,6 +57,7 @@ struct meson_drm { struct drm_crtc *crtc; struct drm_plane *primary_plane; struct drm_plane *overlay_plane; + void *encoders[MESON_ENC_LAST]; const struct meson_drm_soc_limits *limits; diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c index 8110a6e39320..5675bc2a92cf 100644 --- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c +++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c @@ -281,5 +281,18 @@ int meson_encoder_cvbs_init(struct meson_drm *priv) } drm_connector_attach_encoder(connector, &meson_encoder_cvbs->encoder); + priv->encoders[MESON_ENC_CVBS] = meson_encoder_cvbs; + return 0; } + +void meson_encoder_cvbs_remove(struct meson_drm *priv) +{ + struct meson_encoder_cvbs *meson_encoder_cvbs; + + if (priv->encoders[MESON_ENC_CVBS]) { + meson_encoder_cvbs = priv->encoders[MESON_ENC_CVBS]; + drm_bridge_remove(&meson_encoder_cvbs->bridge); + drm_bridge_remove(meson_encoder_cvbs->next_bridge); + } +} diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.h b/drivers/gpu/drm/meson/meson_encoder_cvbs.h index 61d9d183ce7f..09710fec3c66 100644 --- a/drivers/gpu/drm/meson/meson_encoder_cvbs.h +++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.h @@ -25,5 +25,6 @@ struct meson_cvbs_mode { extern struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT]; int meson_encoder_cvbs_init(struct meson_drm *priv); +void meson_encoder_cvbs_remove(struct meson_drm *priv); #endif /* __MESON_VENC_CVBS_H */ diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c index 2f616c55c271..53231bfdf7e2 100644 --- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c +++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c @@ -452,6 +452,8 @@ int meson_encoder_hdmi_init(struct meson_drm *priv) meson_encoder_hdmi->cec_notifier = notifier; } + priv->encoders[MESON_ENC_HDMI] = meson_encoder_hdmi; + dev_dbg(priv->dev, "HDMI encoder initialized\n"); return 0; @@ -460,3 +462,14 @@ err_put_node: of_node_put(remote); return ret; } + +void meson_encoder_hdmi_remove(struct meson_drm *priv) +{ + struct meson_encoder_hdmi *meson_encoder_hdmi; + + if (priv->encoders[MESON_ENC_HDMI]) { + meson_encoder_hdmi = priv->encoders[MESON_ENC_HDMI]; + drm_bridge_remove(&meson_encoder_hdmi->bridge); + drm_bridge_remove(meson_encoder_hdmi->next_bridge); + } +} diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.h b/drivers/gpu/drm/meson/meson_encoder_hdmi.h index ed19494f0956..a6cd38eb5f71 100644 --- a/drivers/gpu/drm/meson/meson_encoder_hdmi.h +++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.h @@ -8,5 +8,6 @@ #define __MESON_ENCODER_HDMI_H int meson_encoder_hdmi_init(struct meson_drm *priv); +void meson_encoder_hdmi_remove(struct meson_drm *priv); #endif /* __MESON_ENCODER_HDMI_H */ diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 0ab0e1dd8bbb..2c8b9899625b 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -68,7 +68,7 @@ static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ OUT_PKT3(ring, CP_EVENT_WRITE, 3); - OUT_RING(ring, CACHE_FLUSH_TS | BIT(31)); + OUT_RING(ring, CACHE_FLUSH_TS | CP_EVENT_WRITE_0_IRQ); OUT_RING(ring, rbmemptr(ring, fence)); OUT_RING(ring, submit->seqno); diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 0c6b2a6d0b4c..7cb8d9849c07 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -62,7 +62,7 @@ static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ OUT_PKT3(ring, CP_EVENT_WRITE, 3); - OUT_RING(ring, CACHE_FLUSH_TS | BIT(31)); + OUT_RING(ring, CACHE_FLUSH_TS | CP_EVENT_WRITE_0_IRQ); OUT_RING(ring, rbmemptr(ring, fence)); OUT_RING(ring, submit->seqno); diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h index b03e2c413ab1..beea4a7fc1df 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h +++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h @@ -1413,6 +1413,10 @@ static inline uint32_t REG_A6XX_RBBM_PERFCTR_RBBM_SEL(uint32_t i0) { return 0x00 #define REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL 0x00000011 +#define REG_A6XX_RBBM_GBIF_HALT 0x00000016 + +#define REG_A6XX_RBBM_GBIF_HALT_ACK 0x00000017 + #define REG_A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD 0x0000001c #define A6XX_RBBM_WAIT_FOR_GPU_IDLE_CMD_WAIT_GPU_IDLE 0x00000001 diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 310a317885a1..e033d6a67a20 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -873,9 +873,47 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) (val & 1), 100, 1000); } +#define GBIF_CLIENT_HALT_MASK BIT(0) +#define GBIF_ARB_HALT_MASK BIT(1) + +static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu) +{ + struct msm_gpu *gpu = &adreno_gpu->base; + + if (!a6xx_has_gbif(adreno_gpu)) { + gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); + spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & + 0xf) == 0xf); + gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); + + return; + } + + /* Halt the gx side of GBIF */ + gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1); + spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1); + + /* Halt new client requests on GBIF */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); + spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & + (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); + + /* Halt all AXI requests on GBIF */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); + spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & + (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); + + /* The GBIF halt needs to be explicitly cleared */ + gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); +} + /* Force the GMU off in case it isn't responsive */ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + /* Flush all the queues */ a6xx_hfi_stop(gmu); @@ -887,6 +925,15 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) /* Make sure there are no outstanding RPMh votes */ a6xx_gmu_rpmh_off(gmu); + + /* Halt the gmu cm3 core */ + gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); + + a6xx_bus_clear_pending_transactions(adreno_gpu); + + /* Reset GPU core blocks */ + gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, 1); + udelay(100); } static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu) @@ -1014,36 +1061,6 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) return true; } -#define GBIF_CLIENT_HALT_MASK BIT(0) -#define GBIF_ARB_HALT_MASK BIT(1) - -static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu) -{ - struct msm_gpu *gpu = &adreno_gpu->base; - - if (!a6xx_has_gbif(adreno_gpu)) { - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf); - spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & - 0xf) == 0xf); - gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0); - - return; - } - - /* Halt new client requests on GBIF */ - gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK); - spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & - (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK); - - /* Halt all AXI requests on GBIF */ - gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK); - spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) & - (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK); - - /* The GBIF halt needs to be explicitly cleared */ - gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0); -} - /* Gracefully try to shut down the GMU and by extension the GPU */ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) { @@ -1069,7 +1086,11 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) a6xx_bus_clear_pending_transactions(adreno_gpu); /* tell the GMU we want to slumber */ - a6xx_gmu_notify_slumber(gmu); + ret = a6xx_gmu_notify_slumber(gmu); + if (ret) { + a6xx_gmu_force_off(gmu); + return; + } ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 4d501100b9e4..fdc578016e0b 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -10,6 +10,7 @@ #include <linux/bitfield.h> #include <linux/devfreq.h> +#include <linux/reset.h> #include <linux/soc/qcom/llcc-qcom.h> #define GPU_PAS_ID 13 @@ -146,7 +147,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, */ OUT_PKT7(ring, CP_EVENT_WRITE, 1); - OUT_RING(ring, 0x31); + OUT_RING(ring, CACHE_INVALIDATE); if (!sysprof) { /* @@ -987,6 +988,10 @@ static int hw_init(struct msm_gpu *gpu) /* Make sure the GMU keeps the GPU on while we set it up */ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); + /* Clear GBIF halt in case GX domain was not collapsed */ + if (a6xx_has_gbif(adreno_gpu)) + gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0); + gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0); /* @@ -1261,7 +1266,7 @@ static void a6xx_recover(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); - int i; + int i, active_submits; adreno_dump_info(gpu); @@ -1272,14 +1277,46 @@ static void a6xx_recover(struct msm_gpu *gpu) if (hang_debug) a6xx_dump(gpu); + /* Halt SQE first */ + gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3); + /* * Turn off keep alive that might have been enabled by the hang * interrupt */ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0); - gpu->funcs->pm_suspend(gpu); - gpu->funcs->pm_resume(gpu); + pm_runtime_dont_use_autosuspend(&gpu->pdev->dev); + + /* active_submit won't change until we make a submission */ + mutex_lock(&gpu->active_lock); + active_submits = gpu->active_submits; + + /* + * Temporarily clear active_submits count to silence a WARN() in the + * runtime suspend cb + */ + gpu->active_submits = 0; + + /* Drop the rpm refcount from active submits */ + if (active_submits) + pm_runtime_put(&gpu->pdev->dev); + + /* And the final one from recover worker */ + pm_runtime_put_sync(&gpu->pdev->dev); + + /* Call into gpucc driver to poll for cx gdsc collapse */ + reset_control_reset(gpu->cx_collapse); + + pm_runtime_use_autosuspend(&gpu->pdev->dev); + + if (active_submits) + pm_runtime_get(&gpu->pdev->dev); + + pm_runtime_get_sync(&gpu->pdev->dev); + + gpu->active_submits = active_submits; + mutex_unlock(&gpu->active_lock); msm_gpu_hw_init(gpu); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 781dcd3fb283..13ce321283ff 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -412,7 +412,6 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, struct dpu_format *format; struct dpu_hw_ctl *ctl = mixer->lm_ctl; - u32 flush_mask; uint32_t stage_idx, lm_idx; int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 }; bool bg_alpha_enable = false; @@ -420,6 +419,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, memset(fetch_active, 0, sizeof(fetch_active)); drm_atomic_crtc_for_each_plane(plane, crtc) { + enum dpu_sspp sspp_idx; + state = plane->state; if (!state) continue; @@ -430,14 +431,14 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, pstate = to_dpu_plane_state(state); fb = state->fb; - dpu_plane_get_ctl_flush(plane, ctl, &flush_mask); - set_bit(dpu_plane_pipe(plane), fetch_active); + sspp_idx = dpu_plane_pipe(plane); + set_bit(sspp_idx, fetch_active); DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d\n", crtc->base.id, pstate->stage, plane->base.id, - dpu_plane_pipe(plane) - SSPP_VIG0, + sspp_idx - SSPP_VIG0, state->fb ? state->fb->base.id : -1); format = to_dpu_format(msm_framebuffer_format(pstate->base.fb)); @@ -447,13 +448,13 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, stage_idx = zpos_cnt[pstate->stage]++; stage_cfg->stage[pstate->stage][stage_idx] = - dpu_plane_pipe(plane); + sspp_idx; stage_cfg->multirect_index[pstate->stage][stage_idx] = pstate->multirect_index; trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane), state, pstate, stage_idx, - dpu_plane_pipe(plane) - SSPP_VIG0, + sspp_idx - SSPP_VIG0, format->base.pixel_format, fb ? fb->modifier : 0); @@ -462,7 +463,8 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc, _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate, format); - mixer[lm_idx].flush_mask |= flush_mask; + mixer[lm_idx].lm_ctl->ops.update_pending_flush_sspp(mixer[lm_idx].lm_ctl, + sspp_idx); if (bg_alpha_enable && !format->alpha_enable) mixer[lm_idx].mixer_op_mode = 0; @@ -496,7 +498,6 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) for (i = 0; i < cstate->num_mixers; i++) { mixer[i].mixer_op_mode = 0; - mixer[i].flush_mask = 0; if (mixer[i].lm_ctl->ops.clear_all_blendstages) mixer[i].lm_ctl->ops.clear_all_blendstages( mixer[i].lm_ctl); @@ -513,17 +514,14 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc) lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode); - mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl, - mixer[i].hw_lm->idx); - /* stage config flush mask */ - ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask); + ctl->ops.update_pending_flush_mixer(ctl, + mixer[i].hw_lm->idx); - DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n", + DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d\n", mixer[i].hw_lm->idx - LM_0, mixer[i].mixer_op_mode, - ctl->idx - CTL_0, - mixer[i].flush_mask); + ctl->idx - CTL_0); ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx, &stage_cfg); @@ -767,16 +765,9 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc) dspp->ops.setup_pcc(dspp, &cfg); } - mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl, - mixer[i].hw_dspp->idx); - /* stage config flush mask */ - ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask); - - DRM_DEBUG_ATOMIC("lm %d, ctl %d, flush mask 0x%x\n", - mixer[i].hw_lm->idx - DSPP_0, - ctl->idx - CTL_0, - mixer[i].flush_mask); + ctl->ops.update_pending_flush_dspp(ctl, + mixer[i].hw_dspp->idx); } } @@ -1235,17 +1226,8 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc, } for (i = 1; i < SSPP_MAX; i++) { - if (pipe_staged[i]) { + if (pipe_staged[i]) dpu_plane_clear_multirect(pipe_staged[i]); - - if (is_dpu_plane_virtual(pipe_staged[i]->plane)) { - DPU_ERROR( - "r1 only virt plane:%d not supported\n", - pipe_staged[i]->plane->base.id); - rc = -EINVAL; - goto end; - } - } } z_pos = -1; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h index 9b67645c2574..539b68b1626a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h @@ -97,7 +97,6 @@ struct dpu_crtc_mixer { struct dpu_hw_ctl *lm_ctl; struct dpu_hw_dspp *hw_dspp; u32 mixer_op_mode; - u32 flush_mask; }; /** diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index c682d4e02d1b..9c6817b5a194 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -162,7 +162,7 @@ enum dpu_enc_rc_states { * @vsync_event_work: worker to handle vsync event for autorefresh * @topology: topology of the display * @idle_timeout: idle timeout duration in milliseconds - * @dsc: msm_display_dsc_config pointer, for DSC-enabled encoders + * @dsc: drm_dsc_config pointer, for DSC-enabled encoders */ struct dpu_encoder_virt { struct drm_encoder base; @@ -208,7 +208,7 @@ struct dpu_encoder_virt { bool wide_bus_en; /* DSC configuration */ - struct msm_display_dsc_config *dsc; + struct drm_dsc_config *dsc; }; #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base) @@ -1791,12 +1791,12 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work) } static u32 -dpu_encoder_dsc_initial_line_calc(struct msm_display_dsc_config *dsc, +dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc, u32 enc_ip_width) { int ssm_delay, total_pixels, soft_slice_per_enc; - soft_slice_per_enc = enc_ip_width / dsc->drm->slice_width; + soft_slice_per_enc = enc_ip_width / dsc->slice_width; /* * minimum number of initial line pixels is a sum of: @@ -1808,16 +1808,16 @@ dpu_encoder_dsc_initial_line_calc(struct msm_display_dsc_config *dsc, * 5. 6 additional pixels as the output of the rate buffer is * 48 bits wide */ - ssm_delay = ((dsc->drm->bits_per_component < 10) ? 84 : 92); - total_pixels = ssm_delay * 3 + dsc->drm->initial_xmit_delay + 47; + ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92); + total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47; if (soft_slice_per_enc > 1) total_pixels += (ssm_delay * 3); - return DIV_ROUND_UP(total_pixels, dsc->drm->slice_width); + return DIV_ROUND_UP(total_pixels, dsc->slice_width); } static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc, struct dpu_hw_pingpong *hw_pp, - struct msm_display_dsc_config *dsc, + struct drm_dsc_config *dsc, u32 common_mode, u32 initial_lines) { @@ -1835,7 +1835,7 @@ static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc, } static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, - struct msm_display_dsc_config *dsc) + struct drm_dsc_config *dsc) { /* coding only for 2LM, 2enc, 1 dsc config */ struct dpu_encoder_phys *enc_master = dpu_enc->cur_master; @@ -1858,14 +1858,15 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc, } } - pic_width = dsc->drm->pic_width; + dsc_common_mode = 0; + pic_width = dsc->pic_width; dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL; if (enc_master->intf_mode == INTF_MODE_VIDEO) dsc_common_mode |= DSC_MODE_VIDEO; - this_frame_slices = pic_width / dsc->drm->slice_width; - intf_ip_w = this_frame_slices * dsc->drm->slice_width; + this_frame_slices = pic_width / dsc->slice_width; + intf_ip_w = this_frame_slices * dsc->slice_width; /* * dsc merge case: when using 2 encoders for the same stream, @@ -1980,7 +1981,6 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc) { struct dpu_hw_mixer_cfg mixer; int i, num_lm; - u32 flush_mask = 0; struct dpu_global_state *global_state; struct dpu_hw_blk *hw_lm[2]; struct dpu_hw_mixer *hw_mixer[2]; @@ -1999,9 +1999,8 @@ static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc) for (i = 0; i < num_lm; i++) { hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]); - flush_mask = phys_enc->hw_ctl->ops.get_bitmask_mixer(ctl, hw_mixer[i]->idx); - if (phys_enc->hw_ctl->ops.update_pending_flush) - phys_enc->hw_ctl->ops.update_pending_flush(ctl, flush_mask); + if (phys_enc->hw_ctl->ops.update_pending_flush_mixer) + phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx); /* clear all blendstages */ if (phys_enc->hw_ctl->ops.setup_blendstage) @@ -2061,6 +2060,12 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) intf_cfg.stream_sel = 0; /* Don't care value for video mode */ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); + + if (phys_enc->hw_intf) + intf_cfg.intf = phys_enc->hw_intf->idx; + if (phys_enc->hw_wb) + intf_cfg.wb = phys_enc->hw_wb->idx; + if (phys_enc->hw_pp->merge_3d) intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h index d4d1ecd416e3..9e7236ef34e6 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h @@ -36,7 +36,7 @@ struct msm_display_info { uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY]; bool is_cmd_mode; bool is_te_using_watchdog_timer; - struct msm_display_dsc_config *dsc; + struct drm_dsc_config *dsc; }; /** diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index 0239a811d5ec..27f029fdc682 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -1333,7 +1333,7 @@ static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = { static const struct dpu_vbif_cfg msm8998_vbif[] = { { - .name = "vbif_0", .id = VBIF_0, + .name = "vbif_rt", .id = VBIF_RT, .base = 0, .len = 0x1040, .default_ot_rd_limit = 32, .default_ot_wr_limit = 32, @@ -1363,7 +1363,7 @@ static const struct dpu_vbif_cfg msm8998_vbif[] = { static const struct dpu_vbif_cfg sdm845_vbif[] = { { - .name = "vbif_0", .id = VBIF_0, + .name = "vbif_rt", .id = VBIF_RT, .base = 0, .len = 0x1040, .features = BIT(DPU_VBIF_QOS_REMAP), .xin_halt_timeout = 0x4000, @@ -1939,11 +1939,6 @@ static const struct dpu_mdss_hw_cfg_handler cfg_handler[] = { const struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev) { int i; - struct dpu_mdss_cfg *dpu_cfg; - - dpu_cfg = kzalloc(sizeof(*dpu_cfg), GFP_KERNEL); - if (!dpu_cfg) - return ERR_PTR(-ENOMEM); for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) { if (cfg_handler[i].hw_rev == hw_rev) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h index 71fe4c505f5b..38aa38ab1568 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h @@ -76,7 +76,7 @@ enum { /** * MDP TOP BLOCK features - * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe + * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be done per pipe * @DPU_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats * @DPU_MDP_BWC, MDSS HW supports Bandwidth compression. * @DPU_MDP_UBWC_1_0, This chipsets supports Universal Bandwidth diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c index e12b7fa48a7b..a35ecb6676c8 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c @@ -150,92 +150,84 @@ static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx) DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask); } -static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx, +static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx, enum dpu_sspp sspp) { - uint32_t flushbits = 0; - switch (sspp) { case SSPP_VIG0: - flushbits = BIT(0); + ctx->pending_flush_mask |= BIT(0); break; case SSPP_VIG1: - flushbits = BIT(1); + ctx->pending_flush_mask |= BIT(1); break; case SSPP_VIG2: - flushbits = BIT(2); + ctx->pending_flush_mask |= BIT(2); break; case SSPP_VIG3: - flushbits = BIT(18); + ctx->pending_flush_mask |= BIT(18); break; case SSPP_RGB0: - flushbits = BIT(3); + ctx->pending_flush_mask |= BIT(3); break; case SSPP_RGB1: - flushbits = BIT(4); + ctx->pending_flush_mask |= BIT(4); break; case SSPP_RGB2: - flushbits = BIT(5); + ctx->pending_flush_mask |= BIT(5); break; case SSPP_RGB3: - flushbits = BIT(19); + ctx->pending_flush_mask |= BIT(19); break; case SSPP_DMA0: - flushbits = BIT(11); + ctx->pending_flush_mask |= BIT(11); break; case SSPP_DMA1: - flushbits = BIT(12); + ctx->pending_flush_mask |= BIT(12); break; case SSPP_DMA2: - flushbits = BIT(24); + ctx->pending_flush_mask |= BIT(24); break; case SSPP_DMA3: - flushbits = BIT(25); + ctx->pending_flush_mask |= BIT(25); break; case SSPP_CURSOR0: - flushbits = BIT(22); + ctx->pending_flush_mask |= BIT(22); break; case SSPP_CURSOR1: - flushbits = BIT(23); + ctx->pending_flush_mask |= BIT(23); break; default: break; } - - return flushbits; } -static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx, +static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx, enum dpu_lm lm) { - uint32_t flushbits = 0; - switch (lm) { case LM_0: - flushbits = BIT(6); + ctx->pending_flush_mask |= BIT(6); break; case LM_1: - flushbits = BIT(7); + ctx->pending_flush_mask |= BIT(7); break; case LM_2: - flushbits = BIT(8); + ctx->pending_flush_mask |= BIT(8); break; case LM_3: - flushbits = BIT(9); + ctx->pending_flush_mask |= BIT(9); break; case LM_4: - flushbits = BIT(10); + ctx->pending_flush_mask |= BIT(10); break; case LM_5: - flushbits = BIT(20); + ctx->pending_flush_mask |= BIT(20); break; default: - return -EINVAL; + break; } - flushbits |= CTL_FLUSH_MASK_CTL; - - return flushbits; + ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL; } static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx, @@ -294,29 +286,25 @@ static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx, ctx->pending_flush_mask |= BIT(MERGE_3D_IDX); } -static uint32_t dpu_hw_ctl_get_bitmask_dspp(struct dpu_hw_ctl *ctx, +static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx, enum dpu_dspp dspp) { - uint32_t flushbits = 0; - switch (dspp) { case DSPP_0: - flushbits = BIT(13); + ctx->pending_flush_mask |= BIT(13); break; case DSPP_1: - flushbits = BIT(14); + ctx->pending_flush_mask |= BIT(14); break; case DSPP_2: - flushbits = BIT(15); + ctx->pending_flush_mask |= BIT(15); break; case DSPP_3: - flushbits = BIT(21); + ctx->pending_flush_mask |= BIT(21); break; default: - return 0; + break; } - - return flushbits; } static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us) @@ -685,9 +673,9 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops, ops->wait_reset_status = dpu_hw_ctl_wait_reset_status; ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages; ops->setup_blendstage = dpu_hw_ctl_setup_blendstage; - ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp; - ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer; - ops->get_bitmask_dspp = dpu_hw_ctl_get_bitmask_dspp; + ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp; + ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer; + ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp; if (cap & BIT(DPU_CTL_FETCH_ACTIVE)) ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active; }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h index 7d9ad6a3f9f6..96c012ec8467 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h @@ -130,6 +130,32 @@ struct dpu_hw_ctl_ops { enum dpu_merge_3d blk); /** + * OR in the given flushbits to the cached pending_flush_mask + * No effect on hardware + * @ctx : ctl path ctx pointer + * @blk : SSPP block index + */ + void (*update_pending_flush_sspp)(struct dpu_hw_ctl *ctx, + enum dpu_sspp blk); + + /** + * OR in the given flushbits to the cached pending_flush_mask + * No effect on hardware + * @ctx : ctl path ctx pointer + * @blk : LM block index + */ + void (*update_pending_flush_mixer)(struct dpu_hw_ctl *ctx, + enum dpu_lm blk); + + /** + * OR in the given flushbits to the cached pending_flush_mask + * No effect on hardware + * @ctx : ctl path ctx pointer + * @blk : DSPP block index + */ + void (*update_pending_flush_dspp)(struct dpu_hw_ctl *ctx, + enum dpu_dspp blk); + /** * Write the value of the pending_flush_mask to hardware * @ctx : ctl path ctx pointer */ @@ -171,15 +197,6 @@ struct dpu_hw_ctl_ops { */ int (*wait_reset_status)(struct dpu_hw_ctl *ctx); - uint32_t (*get_bitmask_sspp)(struct dpu_hw_ctl *ctx, - enum dpu_sspp blk); - - uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx, - enum dpu_lm blk); - - uint32_t (*get_bitmask_dspp)(struct dpu_hw_ctl *ctx, - enum dpu_dspp blk); - /** * Set all blend stages to disabled * @ctx : ctl path ctx pointer diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c index 411689ae6382..f2ddcfb6f7ee 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c @@ -37,7 +37,7 @@ static void dpu_hw_dsc_disable(struct dpu_hw_dsc *dsc) } static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc, - struct msm_display_dsc_config *dsc, + struct drm_dsc_config *dsc, u32 mode, u32 initial_lines) { @@ -52,89 +52,89 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc, if (is_cmd_mode) initial_lines += 1; - slice_last_group_size = 3 - (dsc->drm->slice_width % 3); + slice_last_group_size = 3 - (dsc->slice_width % 3); data = (initial_lines << 20); data |= ((slice_last_group_size - 1) << 18); /* bpp is 6.4 format, 4 LSBs bits are for fractional part */ - data |= dsc->drm->bits_per_pixel << 12; - lsb = dsc->drm->bits_per_pixel % 4; - bpp = dsc->drm->bits_per_pixel / 4; + data |= dsc->bits_per_pixel << 12; + lsb = dsc->bits_per_pixel % 4; + bpp = dsc->bits_per_pixel / 4; bpp *= 4; bpp <<= 4; bpp |= lsb; data |= bpp << 8; - data |= (dsc->drm->block_pred_enable << 7); - data |= (dsc->drm->line_buf_depth << 3); - data |= (dsc->drm->simple_422 << 2); - data |= (dsc->drm->convert_rgb << 1); - data |= dsc->drm->bits_per_component; + data |= (dsc->block_pred_enable << 7); + data |= (dsc->line_buf_depth << 3); + data |= (dsc->simple_422 << 2); + data |= (dsc->convert_rgb << 1); + data |= dsc->bits_per_component; DPU_REG_WRITE(c, DSC_ENC, data); - data = dsc->drm->pic_width << 16; - data |= dsc->drm->pic_height; + data = dsc->pic_width << 16; + data |= dsc->pic_height; DPU_REG_WRITE(c, DSC_PICTURE, data); - data = dsc->drm->slice_width << 16; - data |= dsc->drm->slice_height; + data = dsc->slice_width << 16; + data |= dsc->slice_height; DPU_REG_WRITE(c, DSC_SLICE, data); - data = dsc->drm->slice_chunk_size << 16; + data = dsc->slice_chunk_size << 16; DPU_REG_WRITE(c, DSC_CHUNK_SIZE, data); - data = dsc->drm->initial_dec_delay << 16; - data |= dsc->drm->initial_xmit_delay; + data = dsc->initial_dec_delay << 16; + data |= dsc->initial_xmit_delay; DPU_REG_WRITE(c, DSC_DELAY, data); - data = dsc->drm->initial_scale_value; + data = dsc->initial_scale_value; DPU_REG_WRITE(c, DSC_SCALE_INITIAL, data); - data = dsc->drm->scale_decrement_interval; + data = dsc->scale_decrement_interval; DPU_REG_WRITE(c, DSC_SCALE_DEC_INTERVAL, data); - data = dsc->drm->scale_increment_interval; + data = dsc->scale_increment_interval; DPU_REG_WRITE(c, DSC_SCALE_INC_INTERVAL, data); - data = dsc->drm->first_line_bpg_offset; + data = dsc->first_line_bpg_offset; DPU_REG_WRITE(c, DSC_FIRST_LINE_BPG_OFFSET, data); - data = dsc->drm->nfl_bpg_offset << 16; - data |= dsc->drm->slice_bpg_offset; + data = dsc->nfl_bpg_offset << 16; + data |= dsc->slice_bpg_offset; DPU_REG_WRITE(c, DSC_BPG_OFFSET, data); - data = dsc->drm->initial_offset << 16; - data |= dsc->drm->final_offset; + data = dsc->initial_offset << 16; + data |= dsc->final_offset; DPU_REG_WRITE(c, DSC_DSC_OFFSET, data); - det_thresh_flatness = 7 + 2 * (dsc->drm->bits_per_component - 8); + det_thresh_flatness = 7 + 2 * (dsc->bits_per_component - 8); data = det_thresh_flatness << 10; - data |= dsc->drm->flatness_max_qp << 5; - data |= dsc->drm->flatness_min_qp; + data |= dsc->flatness_max_qp << 5; + data |= dsc->flatness_min_qp; DPU_REG_WRITE(c, DSC_FLATNESS, data); - data = dsc->drm->rc_model_size; + data = dsc->rc_model_size; DPU_REG_WRITE(c, DSC_RC_MODEL_SIZE, data); - data = dsc->drm->rc_tgt_offset_low << 18; - data |= dsc->drm->rc_tgt_offset_high << 14; - data |= dsc->drm->rc_quant_incr_limit1 << 9; - data |= dsc->drm->rc_quant_incr_limit0 << 4; - data |= dsc->drm->rc_edge_factor; + data = dsc->rc_tgt_offset_low << 18; + data |= dsc->rc_tgt_offset_high << 14; + data |= dsc->rc_quant_incr_limit1 << 9; + data |= dsc->rc_quant_incr_limit0 << 4; + data |= dsc->rc_edge_factor; DPU_REG_WRITE(c, DSC_RC, data); } static void dpu_hw_dsc_config_thresh(struct dpu_hw_dsc *hw_dsc, - struct msm_display_dsc_config *dsc) + struct drm_dsc_config *dsc) { - struct drm_dsc_rc_range_parameters *rc = dsc->drm->rc_range_params; + struct drm_dsc_rc_range_parameters *rc = dsc->rc_range_params; struct dpu_hw_blk_reg_map *c = &hw_dsc->hw; u32 off; int i; off = DSC_RC_BUF_THRESH; for (i = 0; i < DSC_NUM_BUF_RANGES - 1 ; i++) { - DPU_REG_WRITE(c, off, dsc->drm->rc_buf_thresh[i]); + DPU_REG_WRITE(c, off, dsc->rc_buf_thresh[i]); off += 4; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h index 45e4118f1fa2..c0b77fe1a696 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h @@ -31,7 +31,7 @@ struct dpu_hw_dsc_ops { * @initial_lines: amount of initial lines to be used */ void (*dsc_config)(struct dpu_hw_dsc *hw_dsc, - struct msm_display_dsc_config *dsc, + struct drm_dsc_config *dsc, u32 mode, u32 initial_lines); @@ -41,7 +41,7 @@ struct dpu_hw_dsc_ops { * @dsc: panel dsc parameters */ void (*dsc_config_thresh)(struct dpu_hw_dsc *hw_dsc, - struct msm_display_dsc_config *dsc); + struct drm_dsc_config *dsc); }; struct dpu_hw_dsc { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h index 9f402be55fbf..d3b0ed0a9c6c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h @@ -273,11 +273,9 @@ enum dpu_wd_timer { }; enum dpu_vbif { - VBIF_0, - VBIF_1, + VBIF_RT, + VBIF_NRT, VBIF_MAX, - VBIF_RT = VBIF_0, - VBIF_NRT = VBIF_1 }; /** diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c index 102c21bb4192..691c471b08c2 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c @@ -780,8 +780,7 @@ static const struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp, } struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx, - void __iomem *addr, const struct dpu_mdss_cfg *catalog, - bool is_virtual_pipe) + void __iomem *addr, const struct dpu_mdss_cfg *catalog) { struct dpu_hw_pipe *hw_pipe; const struct dpu_sspp_cfg *cfg; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h index 78b1bc9e004f..0c95b7e64f6c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h @@ -377,11 +377,9 @@ struct dpu_kms; * @idx: Pipe index for which driver object is required * @addr: Mapped register io address of MDP * @catalog : Pointer to mdss catalog data - * @is_virtual_pipe: is this pipe virtual pipe */ struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx, - void __iomem *addr, const struct dpu_mdss_cfg *catalog, - bool is_virtual_pipe); + void __iomem *addr, const struct dpu_mdss_cfg *catalog); /** * dpu_hw_sspp_destroy(): Destroys SSPP driver context diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index 008e1420e6e5..5e6e2626151e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -384,12 +384,9 @@ static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms) struct icc_path *path1; struct drm_device *dev = dpu_kms->dev; struct device *dpu_dev = dev->dev; - struct device *mdss_dev = dpu_dev->parent; - /* Interconnects are a part of MDSS device tree binding, not the - * MDP/DPU device. */ - path0 = of_icc_get(mdss_dev, "mdp0-mem"); - path1 = of_icc_get(mdss_dev, "mdp1-mem"); + path0 = msm_icc_get(dpu_dev, "mdp0-mem"); + path1 = msm_icc_get(dpu_dev, "mdp1-mem"); if (IS_ERR_OR_NULL(path0)) return PTR_ERR_OR_ZERO(path0); @@ -782,7 +779,7 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms) catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)); plane = dpu_plane_init(dev, catalog->sspp[i].id, type, - (1UL << max_crtc_count) - 1, 0); + (1UL << max_crtc_count) - 1); if (IS_ERR(plane)) { DPU_ERROR("dpu_plane_init failed\n"); ret = PTR_ERR(plane); @@ -826,12 +823,10 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms) _dpu_kms_mmu_destroy(dpu_kms); if (dpu_kms->catalog) { - for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { - u32 vbif_idx = dpu_kms->catalog->vbif[i].id; - - if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx]) { - dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]); - dpu_kms->hw_vbif[vbif_idx] = NULL; + for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { + if (dpu_kms->hw_vbif[i]) { + dpu_hw_vbif_destroy(dpu_kms->hw_vbif[i]); + dpu_kms->hw_vbif[i] = NULL; } } } @@ -902,12 +897,10 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k int i; struct dpu_kms *dpu_kms; const struct dpu_mdss_cfg *cat; - struct dpu_hw_mdp *top; dpu_kms = to_dpu_kms(kms); cat = dpu_kms->catalog; - top = dpu_kms->hw_mdp; pm_runtime_get_sync(&dpu_kms->pdev->dev); @@ -1113,12 +1106,10 @@ static int dpu_kms_hw_init(struct msm_kms *kms) for (i = 0; i < dpu_kms->catalog->vbif_count; i++) { u32 vbif_idx = dpu_kms->catalog->vbif[i].id; - dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx, + dpu_kms->hw_vbif[vbif_idx] = dpu_hw_vbif_init(vbif_idx, dpu_kms->vbif[vbif_idx], dpu_kms->catalog); - if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) { + if (IS_ERR(dpu_kms->hw_vbif[vbif_idx])) { rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]); - if (!dpu_kms->hw_vbif[vbif_idx]) - rc = -EINVAL; DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc); dpu_kms->hw_vbif[vbif_idx] = NULL; goto power_error; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index a617a3d8b1bc..658005f609f4 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -91,7 +91,7 @@ enum dpu_plane_qos { /* * struct dpu_plane - local dpu plane structure * @aspace: address space pointer - * @mplane_list: List of multirect planes of the same pipe + * @csc_ptr: Points to dpu_csc_cfg structure to use for current * @catalog: Points to dpu catalog structure * @revalidate: force revalidation of all the plane properties */ @@ -106,8 +106,6 @@ struct dpu_plane { uint32_t color_fill; bool is_error; bool is_rt_pipe; - bool is_virtual; - struct list_head mplane_list; const struct dpu_mdss_cfg *catalog; }; @@ -225,7 +223,7 @@ static void _dpu_plane_calc_clk(struct drm_plane *plane, struct dpu_hw_pipe_cfg static int _dpu_plane_calc_fill_level(struct drm_plane *plane, const struct dpu_format *fmt, u32 src_width) { - struct dpu_plane *pdpu, *tmp; + struct dpu_plane *pdpu; struct dpu_plane_state *pstate; u32 fixed_buff_size; u32 total_fl; @@ -239,19 +237,7 @@ static int _dpu_plane_calc_fill_level(struct drm_plane *plane, pstate = to_dpu_plane_state(plane->state); fixed_buff_size = pdpu->catalog->caps->pixel_ram_size; - list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) { - u32 tmp_width; - - if (!tmp->base.state->visible) - continue; - tmp_width = drm_rect_width(&tmp->base.state->src) >> 16; - DPU_DEBUG("plane%d/%d src_width:%d/%d\n", - pdpu->base.base.id, tmp->base.base.id, - src_width, - tmp_width); - src_width = max_t(u32, src_width, - tmp_width); - } + /* FIXME: in multirect case account for the src_width of all the planes */ if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) { if (fmt->chroma_sample == DPU_CHROMA_420) { @@ -854,13 +840,8 @@ int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane) } done: - if (dpu_plane[R0]->is_virtual) { - pstate[R0]->multirect_index = DPU_SSPP_RECT_1; - pstate[R1]->multirect_index = DPU_SSPP_RECT_0; - } else { - pstate[R0]->multirect_index = DPU_SSPP_RECT_0; - pstate[R1]->multirect_index = DPU_SSPP_RECT_1; - } + pstate[R0]->multirect_index = DPU_SSPP_RECT_0; + pstate[R1]->multirect_index = DPU_SSPP_RECT_1; DPU_DEBUG_PLANE(dpu_plane[R0], "R0: %d - %d\n", pstate[R0]->multirect_mode, pstate[R0]->multirect_index); @@ -869,18 +850,6 @@ done: return 0; } -/** - * dpu_plane_get_ctl_flush - get control flush for the given plane - * @plane: Pointer to drm plane structure - * @ctl: Pointer to hardware control driver - * @flush_sspp: Pointer to sspp flush control word - */ -void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl, - u32 *flush_sspp) -{ - *flush_sspp = ctl->ops.get_bitmask_sspp(ctl, dpu_plane_pipe(plane)); -} - static int dpu_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state) { @@ -1266,19 +1235,13 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) static void _dpu_plane_atomic_disable(struct drm_plane *plane) { - struct dpu_plane *pdpu = to_dpu_plane(plane); struct drm_plane_state *state = plane->state; struct dpu_plane_state *pstate = to_dpu_plane_state(state); - trace_dpu_plane_disable(DRMID(plane), is_dpu_plane_virtual(plane), + trace_dpu_plane_disable(DRMID(plane), false, pstate->multirect_mode); pstate->pending = true; - - if (is_dpu_plane_virtual(plane) && - pdpu->pipe_hw && pdpu->pipe_hw->ops.setup_multirect) - pdpu->pipe_hw->ops.setup_multirect(pdpu->pipe_hw, - DPU_SSPP_RECT_SOLO, DPU_SSPP_MULTIRECT_NONE); } static void dpu_plane_atomic_update(struct drm_plane *plane, @@ -1493,22 +1456,16 @@ enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane) return plane ? to_dpu_plane(plane)->pipe : SSPP_NONE; } -bool is_dpu_plane_virtual(struct drm_plane *plane) -{ - return plane ? to_dpu_plane(plane)->is_virtual : false; -} - /* initialize plane */ struct drm_plane *dpu_plane_init(struct drm_device *dev, uint32_t pipe, enum drm_plane_type type, - unsigned long possible_crtcs, u32 master_plane_id) + unsigned long possible_crtcs) { - struct drm_plane *plane = NULL, *master_plane = NULL; + struct drm_plane *plane = NULL; const uint32_t *format_list; struct dpu_plane *pdpu; struct msm_drm_private *priv = dev->dev_private; struct dpu_kms *kms = to_dpu_kms(priv->kms); - int zpos_max = DPU_ZPOS_MAX; uint32_t num_formats; uint32_t supported_rotations; int ret = -EINVAL; @@ -1524,18 +1481,9 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev, /* cache local stuff for later */ plane = &pdpu->base; pdpu->pipe = pipe; - pdpu->is_virtual = (master_plane_id != 0); - INIT_LIST_HEAD(&pdpu->mplane_list); - master_plane = drm_plane_find(dev, NULL, master_plane_id); - if (master_plane) { - struct dpu_plane *mpdpu = to_dpu_plane(master_plane); - - list_add_tail(&pdpu->mplane_list, &mpdpu->mplane_list); - } /* initialize underlying h/w driver */ - pdpu->pipe_hw = dpu_hw_sspp_init(pipe, kms->mmio, kms->catalog, - master_plane_id != 0); + pdpu->pipe_hw = dpu_hw_sspp_init(pipe, kms->mmio, kms->catalog); if (IS_ERR(pdpu->pipe_hw)) { DPU_ERROR("[%u]SSPP init failed\n", pipe); ret = PTR_ERR(pdpu->pipe_hw); @@ -1545,14 +1493,8 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev, goto clean_sspp; } - if (pdpu->is_virtual) { - format_list = pdpu->pipe_hw->cap->sblk->virt_format_list; - num_formats = pdpu->pipe_hw->cap->sblk->virt_num_formats; - } - else { - format_list = pdpu->pipe_hw->cap->sblk->format_list; - num_formats = pdpu->pipe_hw->cap->sblk->num_formats; - } + format_list = pdpu->pipe_hw->cap->sblk->format_list; + num_formats = pdpu->pipe_hw->cap->sblk->num_formats; ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs, format_list, num_formats, @@ -1562,14 +1504,7 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev, pdpu->catalog = kms->catalog; - if (kms->catalog->mixer_count && - kms->catalog->mixer[0].sblk->maxblendstages) { - zpos_max = kms->catalog->mixer[0].sblk->maxblendstages - 1; - if (zpos_max > DPU_STAGE_MAX - DPU_STAGE_0 - 1) - zpos_max = DPU_STAGE_MAX - DPU_STAGE_0 - 1; - } - - ret = drm_plane_create_zpos_property(plane, 0, 0, zpos_max); + ret = drm_plane_create_zpos_property(plane, 0, 0, DPU_ZPOS_MAX); if (ret) DPU_ERROR("failed to install zpos property, rc = %d\n", ret); @@ -1594,15 +1529,14 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev, mutex_init(&pdpu->lock); - DPU_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", plane->name, - pipe, plane->base.id, master_plane_id); + DPU_DEBUG("%s created for pipe:%u id:%u\n", plane->name, + pipe, plane->base.id); return plane; clean_sspp: if (pdpu && pdpu->pipe_hw) dpu_hw_sspp_destroy(pdpu->pipe_hw); clean_plane: - list_del(&pdpu->mplane_list); kfree(pdpu); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h index e1463107a6fc..b7b1b05199c2 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h @@ -65,23 +65,6 @@ struct dpu_multirect_plane_states { enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane); /** - * is_dpu_plane_virtual - check for virtual plane - * @plane: Pointer to DRM plane object - * returns: true - if the plane is virtual - * false - if the plane is primary - */ -bool is_dpu_plane_virtual(struct drm_plane *plane); - -/** - * dpu_plane_get_ctl_flush - get control flush mask - * @plane: Pointer to DRM plane object - * @ctl: Pointer to control hardware - * @flush_sspp: Pointer to sspp flush control word - */ -void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl, - u32 *flush_sspp); - -/** * dpu_plane_flush - final plane operations before commit flush * @plane: Pointer to drm plane structure */ @@ -99,14 +82,11 @@ void dpu_plane_set_error(struct drm_plane *plane, bool error); * @pipe: dpu hardware pipe identifier * @type: Plane type - PRIMARY/OVERLAY/CURSOR * @possible_crtcs: bitmask of crtc that can be attached to the given pipe - * @master_plane_id: primary plane id of a multirect pipe. 0 value passed for - * a regular plane initialization. A non-zero primary plane - * id will be passed for a virtual pipe initialization. * */ struct drm_plane *dpu_plane_init(struct drm_device *dev, uint32_t pipe, enum drm_plane_type type, - unsigned long possible_crtcs, u32 master_plane_id); + unsigned long possible_crtcs); /** * dpu_plane_validate_multirecti_v2 - validate the multirect planes diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c index 21d20373eb8b..1305e250b71e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c @@ -11,6 +11,26 @@ #include "dpu_hw_vbif.h" #include "dpu_trace.h" +static struct dpu_hw_vbif *dpu_get_vbif(struct dpu_kms *dpu_kms, enum dpu_vbif vbif_idx) +{ + if (vbif_idx < ARRAY_SIZE(dpu_kms->hw_vbif)) + return dpu_kms->hw_vbif[vbif_idx]; + + return NULL; +} + +static const char *dpu_vbif_name(enum dpu_vbif idx) +{ + switch (idx) { + case VBIF_RT: + return "VBIF_RT"; + case VBIF_NRT: + return "VBIF_NRT"; + default: + return "??"; + } +} + /** * _dpu_vbif_wait_for_xin_halt - wait for the xin to halt * @vbif: Pointer to hardware vbif driver @@ -42,12 +62,12 @@ static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id) if (!status) { rc = -ETIMEDOUT; - DPU_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n", - vbif->idx - VBIF_0, xin_id); + DPU_ERROR("%s client %d not halting. TIMEDOUT.\n", + dpu_vbif_name(vbif->idx), xin_id); } else { rc = 0; - DRM_DEBUG_ATOMIC("VBIF %d client %d is halted\n", - vbif->idx - VBIF_0, xin_id); + DRM_DEBUG_ATOMIC("%s client %d is halted\n", + dpu_vbif_name(vbif->idx), xin_id); } return rc; @@ -87,8 +107,8 @@ static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif, } } - DRM_DEBUG_ATOMIC("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n", - vbif->idx - VBIF_0, params->xin_id, + DRM_DEBUG_ATOMIC("%s xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n", + dpu_vbif_name(vbif->idx), params->xin_id, params->width, params->height, params->frame_rate, pps, *ot_lim); } @@ -133,8 +153,8 @@ static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif, } exit: - DRM_DEBUG_ATOMIC("vbif:%d xin:%d ot_lim:%d\n", - vbif->idx - VBIF_0, params->xin_id, ot_lim); + DRM_DEBUG_ATOMIC("%s xin:%d ot_lim:%d\n", + dpu_vbif_name(vbif->idx), params->xin_id, ot_lim); return ot_lim; } @@ -148,20 +168,15 @@ exit: void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms, struct dpu_vbif_set_ot_params *params) { - struct dpu_hw_vbif *vbif = NULL; + struct dpu_hw_vbif *vbif; struct dpu_hw_mdp *mdp; bool forced_on = false; u32 ot_lim; - int ret, i; + int ret; mdp = dpu_kms->hw_mdp; - for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { - if (dpu_kms->hw_vbif[i] && - dpu_kms->hw_vbif[i]->idx == params->vbif_idx) - vbif = dpu_kms->hw_vbif[i]; - } - + vbif = dpu_get_vbif(dpu_kms, params->vbif_idx); if (!vbif || !mdp) { DRM_DEBUG_ATOMIC("invalid arguments vbif %d mdp %d\n", vbif != NULL, mdp != NULL); @@ -204,7 +219,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms, void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, struct dpu_vbif_set_qos_params *params) { - struct dpu_hw_vbif *vbif = NULL; + struct dpu_hw_vbif *vbif; struct dpu_hw_mdp *mdp; bool forced_on = false; const struct dpu_vbif_qos_tbl *qos_tbl; @@ -216,13 +231,7 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, } mdp = dpu_kms->hw_mdp; - for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) { - if (dpu_kms->hw_vbif[i] && - dpu_kms->hw_vbif[i]->idx == params->vbif_idx) { - vbif = dpu_kms->hw_vbif[i]; - break; - } - } + vbif = dpu_get_vbif(dpu_kms, params->vbif_idx); if (!vbif || !vbif->cap) { DPU_ERROR("invalid vbif %d\n", params->vbif_idx); @@ -245,8 +254,8 @@ void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms, forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true); for (i = 0; i < qos_tbl->npriority_lvl; i++) { - DRM_DEBUG_ATOMIC("vbif:%d xin:%d lvl:%d/%d\n", - params->vbif_idx, params->xin_id, i, + DRM_DEBUG_ATOMIC("%s xin:%d lvl:%d/%d\n", + dpu_vbif_name(params->vbif_idx), params->xin_id, i, qos_tbl->priority_lvl[i]); vbif->ops.set_qos_remap(vbif, params->xin_id, i, qos_tbl->priority_lvl[i]); @@ -266,8 +275,8 @@ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms) if (vbif && vbif->ops.clear_errors) { vbif->ops.clear_errors(vbif, &pnd, &src); if (pnd || src) { - DRM_DEBUG_KMS("VBIF %d: pnd 0x%X, src 0x%X\n", - vbif->idx - VBIF_0, pnd, src); + DRM_DEBUG_KMS("%s: pnd 0x%X, src 0x%X\n", + dpu_vbif_name(vbif->idx), pnd, src); } } } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index d2a48caf9d27..b0d21838a134 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -902,12 +902,9 @@ fail: static int mdp5_setup_interconnect(struct platform_device *pdev) { - /* Interconnects are a part of MDSS device tree binding, not the - * MDP5 device. */ - struct device *mdss_dev = pdev->dev.parent; - struct icc_path *path0 = of_icc_get(mdss_dev, "mdp0-mem"); - struct icc_path *path1 = of_icc_get(mdss_dev, "mdp1-mem"); - struct icc_path *path_rot = of_icc_get(mdss_dev, "rotator-mem"); + struct icc_path *path0 = msm_icc_get(&pdev->dev, "mdp0-mem"); + struct icc_path *path1 = msm_icc_get(&pdev->dev, "mdp1-mem"); + struct icc_path *path_rot = msm_icc_get(&pdev->dev, "rotator-mem"); if (IS_ERR(path0)) return PTR_ERR(path0); diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c index 7257515871a9..676279d0ca8d 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.c +++ b/drivers/gpu/drm/msm/dp/dp_catalog.c @@ -431,7 +431,7 @@ void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, if (rate == link_rate_hbr3) pixel_div = 6; - else if (rate == 1620000 || rate == 270000) + else if (rate == 162000 || rate == 270000) pixel_div = 2; else if (rate == link_rate_hbr2) pixel_div = 4; diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index ab6aa13b1639..3854c9f1f7e9 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -1214,7 +1214,7 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, if (ret) return ret; - dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN); + dp_ctrl_train_pattern_set(ctrl, pattern); for (tries = 0; tries <= maximum_retries; tries++) { drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd); @@ -1238,8 +1238,6 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl, return -ETIMEDOUT; } -static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl); - static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, int *training_step) { @@ -1358,25 +1356,7 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl) if (ret) DRM_ERROR("Unable to start link clocks. ret=%d\n", ret); - drm_dbg_dp(ctrl->drm_dev, "link rate=%d pixel_clk=%d\n", - ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate); - - return ret; -} - -static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl) -{ - int ret = 0; - - dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel", - ctrl->dp_ctrl.pixel_rate * 1000); - - ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true); - if (ret) - DRM_ERROR("Unabled to start pixel clocks. ret=%d\n", ret); - - drm_dbg_dp(ctrl->drm_dev, "link rate=%d pixel_clk=%d\n", - ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate); + drm_dbg_dp(ctrl->drm_dev, "link rate=%d\n", ctrl->link->link_params.rate); return ret; } @@ -1520,8 +1500,6 @@ static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl) ctrl->link->phy_params.p_level = 0; ctrl->link->phy_params.v_level = 0; - ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; - ret = dp_ctrl_setup_main_link(ctrl, &training_step); if (ret) goto end; @@ -1535,38 +1513,6 @@ end: return ret; } -static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl); - -static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) -{ - int ret = 0; - - if (!ctrl->link->phy_params.phy_test_pattern_sel) { - drm_dbg_dp(ctrl->drm_dev, - "no test pattern selected by sink\n"); - return ret; - } - - /* - * The global reset will need DP link related clocks to be - * running. Add the global reset just before disabling the - * link clocks and core clocks. - */ - ret = dp_ctrl_off(&ctrl->dp_ctrl); - if (ret) { - DRM_ERROR("failed to disable DP controller\n"); - return ret; - } - - ret = dp_ctrl_on_link(&ctrl->dp_ctrl); - if (!ret) - ret = dp_ctrl_on_stream_phy_test_report(&ctrl->dp_ctrl); - else - DRM_ERROR("failed to enable DP link controller\n"); - - return ret; -} - static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl) { bool success = false; @@ -1619,6 +1565,48 @@ static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl) return success; } +static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) +{ + int ret; + unsigned long pixel_rate; + + if (!ctrl->link->phy_params.phy_test_pattern_sel) { + drm_dbg_dp(ctrl->drm_dev, + "no test pattern selected by sink\n"); + return 0; + } + + /* + * The global reset will need DP link related clocks to be + * running. Add the global reset just before disabling the + * link clocks and core clocks. + */ + ret = dp_ctrl_off(&ctrl->dp_ctrl); + if (ret) { + DRM_ERROR("failed to disable DP controller\n"); + return ret; + } + + ret = dp_ctrl_on_link(&ctrl->dp_ctrl); + if (ret) { + DRM_ERROR("failed to enable DP link controller\n"); + return ret; + } + + pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; + dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel", pixel_rate * 1000); + + ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true); + if (ret) { + DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret); + return ret; + } + + dp_ctrl_send_phy_test_pattern(ctrl); + + return 0; +} + void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl) { struct dp_ctrl_private *ctrl; @@ -1689,11 +1677,12 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) { int rc = 0; struct dp_ctrl_private *ctrl; - u32 rate = 0; + u32 rate; int link_train_max_retries = 5; u32 const phy_cts_pixel_clk_khz = 148500; u8 link_status[DP_LINK_STATUS_SIZE]; unsigned int training_step; + unsigned long pixel_rate; if (!dp_ctrl) return -EINVAL; @@ -1701,25 +1690,24 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl) ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); rate = ctrl->panel->link_info.rate; + pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; dp_power_clk_enable(ctrl->power, DP_CORE_PM, true); if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { drm_dbg_dp(ctrl->drm_dev, "using phy test link parameters\n"); - if (!ctrl->panel->dp_mode.drm_mode.clock) - ctrl->dp_ctrl.pixel_rate = phy_cts_pixel_clk_khz; + if (!pixel_rate) + pixel_rate = phy_cts_pixel_clk_khz; } else { ctrl->link->link_params.rate = rate; ctrl->link->link_params.num_lanes = ctrl->panel->link_info.num_lanes; - ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; } - drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%d\n", + drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%lu\n", ctrl->link->link_params.rate, ctrl->link->link_params.num_lanes, - ctrl->dp_ctrl.pixel_rate); - + pixel_rate); rc = dp_ctrl_enable_mainlink_clocks(ctrl); if (rc) @@ -1816,31 +1804,12 @@ static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl) return dp_ctrl_setup_main_link(ctrl, &training_step); } -static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl) -{ - int ret; - struct dp_ctrl_private *ctrl; - - ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); - - ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; - - ret = dp_ctrl_enable_stream_clocks(ctrl); - if (ret) { - DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret); - return ret; - } - - dp_ctrl_send_phy_test_pattern(ctrl); - - return 0; -} - int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train) { int ret = 0; bool mainlink_ready = false; struct dp_ctrl_private *ctrl; + unsigned long pixel_rate; unsigned long pixel_rate_orig; if (!dp_ctrl) @@ -1848,15 +1817,14 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train) ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); - ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; + pixel_rate = pixel_rate_orig = ctrl->panel->dp_mode.drm_mode.clock; - pixel_rate_orig = ctrl->dp_ctrl.pixel_rate; if (dp_ctrl->wide_bus_en) - ctrl->dp_ctrl.pixel_rate >>= 1; + pixel_rate >>= 1; - drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%d\n", + drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%lu\n", ctrl->link->link_params.rate, - ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate); + ctrl->link->link_params.num_lanes, pixel_rate); if (!dp_power_clk_status(ctrl->power, DP_CTRL_PM)) { /* link clk is off */ ret = dp_ctrl_enable_mainlink_clocks(ctrl); @@ -1866,9 +1834,11 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train) } } - ret = dp_ctrl_enable_stream_clocks(ctrl); + dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel", pixel_rate * 1000); + + ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true); if (ret) { - DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret); + DRM_ERROR("Unable to start pixel clocks. ret=%d\n", ret); goto end; } diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h index b563e2e3bfe5..9f29734af81c 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.h +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h @@ -16,7 +16,6 @@ struct dp_ctrl { bool orientation; atomic_t aborted; - u32 pixel_rate; bool wide_bus_en; }; diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c index 36f0af02749f..36bb6191d2f0 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.c +++ b/drivers/gpu/drm/msm/dp/dp_link.c @@ -786,7 +786,7 @@ static int dp_link_process_link_training_request(struct dp_link_private *link) link->request.test_lane_count); link->dp_link.link_params.num_lanes = link->request.test_lane_count; - link->dp_link.link_params.rate = + link->dp_link.link_params.rate = drm_dp_bw_code_to_link_rate(link->request.test_link_rate); return 0; @@ -965,8 +965,7 @@ static int dp_link_process_link_status_update(struct dp_link_private *link) if (channel_eq_done && clock_recovery_done) return -EINVAL; - - return 0; + return 0; } /** diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index 1625328fa430..39bbabb5daf6 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -6,14 +6,6 @@ #include "dsi.h" #include "dsi_cfg.h" -struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi) -{ - if (!msm_dsi || !msm_dsi_device_connected(msm_dsi)) - return NULL; - - return msm_dsi->encoder; -} - bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi) { unsigned long host_flags = msm_dsi_host_get_mode_flags(msm_dsi->host); @@ -21,7 +13,7 @@ bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi) return !(host_flags & MIPI_DSI_MODE_VIDEO); } -struct msm_display_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi) +struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi) { return msm_dsi_host_get_dsc_config(msm_dsi->host); } @@ -220,7 +212,6 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, struct drm_encoder *encoder) { struct msm_drm_private *priv; - struct drm_bridge *ext_bridge; int ret; if (WARN_ON(!encoder) || WARN_ON(!msm_dsi) || WARN_ON(!dev)) @@ -254,26 +245,10 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, goto fail; } - /* - * check if the dsi encoder output is connected to a panel or an - * external bridge. We create a connector only if we're connected to a - * drm_panel device. When we're connected to an external bridge, we - * assume that the drm_bridge driver will create the connector itself. - */ - ext_bridge = msm_dsi_host_get_bridge(msm_dsi->host); - - if (ext_bridge) - msm_dsi->connector = - msm_dsi_manager_ext_bridge_init(msm_dsi->id); - else - msm_dsi->connector = - msm_dsi_manager_connector_init(msm_dsi->id); - - if (IS_ERR(msm_dsi->connector)) { - ret = PTR_ERR(msm_dsi->connector); + ret = msm_dsi_manager_ext_bridge_init(msm_dsi->id); + if (ret) { DRM_DEV_ERROR(dev->dev, "failed to create dsi connector: %d\n", ret); - msm_dsi->connector = NULL; goto fail; } @@ -287,12 +262,6 @@ fail: msm_dsi->bridge = NULL; } - /* don't destroy connector if we didn't make it */ - if (msm_dsi->connector && !msm_dsi->external_bridge) - msm_dsi->connector->funcs->destroy(msm_dsi->connector); - - msm_dsi->connector = NULL; - return ret; } diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index 580a1e6358bf..2a96b4fe7839 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -12,7 +12,6 @@ #include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_mipi_dsi.h> -#include <drm/drm_panel.h> #include "msm_drv.h" #include "disp/msm_disp_snapshot.h" @@ -30,27 +29,12 @@ enum msm_dsi_phy_usecase { MSM_DSI_PHY_SLAVE, }; -#define DSI_DEV_REGULATOR_MAX 8 #define DSI_BUS_CLK_MAX 4 -/* Regulators for DSI devices */ -struct dsi_reg_entry { - char name[32]; - int enable_load; - int disable_load; -}; - -struct dsi_reg_config { - int num; - struct dsi_reg_entry regs[DSI_DEV_REGULATOR_MAX]; -}; - struct msm_dsi { struct drm_device *dev; struct platform_device *pdev; - /* connector managed by us when we're connected to a drm_panel */ - struct drm_connector *connector; /* internal dsi bridge attached to MDP interface */ struct drm_bridge *bridge; @@ -58,10 +42,8 @@ struct msm_dsi { struct msm_dsi_phy *phy; /* - * panel/external_bridge connected to dsi bridge output, only one of the - * two can be valid at a time + * external_bridge connected to dsi bridge output */ - struct drm_panel *panel; struct drm_bridge *external_bridge; struct device *phy_dev; @@ -76,8 +58,7 @@ struct msm_dsi { /* dsi manager */ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id); void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge); -struct drm_connector *msm_dsi_manager_connector_init(u8 id); -struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id); +int msm_dsi_manager_ext_bridge_init(u8 id); int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg); bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len); int msm_dsi_manager_register(struct msm_dsi *msm_dsi); @@ -87,11 +68,9 @@ void msm_dsi_manager_tpg_enable(void); /* msm dsi */ static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi) { - return msm_dsi->panel || msm_dsi->external_bridge; + return msm_dsi->external_bridge; } -struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi); - /* dsi host */ struct msm_dsi_host; int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host, @@ -116,9 +95,7 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, const struct drm_display_mode *mode); enum drm_mode_status msm_dsi_host_check_dsc(struct mipi_dsi_host *host, const struct drm_display_mode *mode); -struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host); unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host); -struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host); int msm_dsi_host_register(struct mipi_dsi_host *host); void msm_dsi_host_unregister(struct mipi_dsi_host *host); void msm_dsi_host_set_phy_mode(struct mipi_dsi_host *host, @@ -154,7 +131,7 @@ int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi); int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi); void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host); void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host); -struct msm_display_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host); +struct drm_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host); /* dsi phy */ struct msm_dsi_phy; diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c index 2c23324a2296..7e97c239ed48 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c @@ -9,16 +9,16 @@ static const char * const dsi_v2_bus_clk_names[] = { "core_mmss", "iface", "bus", }; +static const struct regulator_bulk_data apq8064_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */ + { .supply = "avdd", .init_load_uA = 10000 }, /* 3.0 V */ + { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */ +}; + static const struct msm_dsi_config apq8064_dsi_cfg = { .io_offset = 0, - .reg_cfg = { - .num = 3, - .regs = { - {"vdda", 100000, 100}, /* 1.2 V */ - {"avdd", 10000, 100}, /* 3.0 V */ - {"vddio", 100000, 100}, /* 1.8 V */ - }, - }, + .regulator_data = apq8064_dsi_regulators, + .num_regulators = ARRAY_SIZE(apq8064_dsi_regulators), .bus_clk_names = dsi_v2_bus_clk_names, .num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names), .io_start = { 0x4700000, 0x5800000 }, @@ -29,16 +29,16 @@ static const char * const dsi_6g_bus_clk_names[] = { "mdp_core", "iface", "bus", "core_mmss", }; +static const struct regulator_bulk_data msm8974_apq8084_regulators[] = { + { .supply = "vdd", .init_load_uA = 150000 }, /* 3.0 V */ + { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */ + { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */ +}; + static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = { .io_offset = DSI_6G_REG_SHIFT, - .reg_cfg = { - .num = 3, - .regs = { - {"vdd", 150000, 100}, /* 3.0 V */ - {"vdda", 100000, 100}, /* 1.2 V */ - {"vddio", 100000, 100}, /* 1.8 V */ - }, - }, + .regulator_data = msm8974_apq8084_regulators, + .num_regulators = ARRAY_SIZE(msm8974_apq8084_regulators), .bus_clk_names = dsi_6g_bus_clk_names, .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names), .io_start = { 0xfd922800, 0xfd922b00 }, @@ -49,15 +49,15 @@ static const char * const dsi_8916_bus_clk_names[] = { "mdp_core", "iface", "bus", }; +static const struct regulator_bulk_data msm8916_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */ + { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */ +}; + static const struct msm_dsi_config msm8916_dsi_cfg = { .io_offset = DSI_6G_REG_SHIFT, - .reg_cfg = { - .num = 2, - .regs = { - {"vdda", 100000, 100}, /* 1.2 V */ - {"vddio", 100000, 100}, /* 1.8 V */ - }, - }, + .regulator_data = msm8916_dsi_regulators, + .num_regulators = ARRAY_SIZE(msm8916_dsi_regulators), .bus_clk_names = dsi_8916_bus_clk_names, .num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names), .io_start = { 0x1a98000 }, @@ -68,34 +68,34 @@ static const char * const dsi_8976_bus_clk_names[] = { "mdp_core", "iface", "bus", }; +static const struct regulator_bulk_data msm8976_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 100000 }, /* 1.2 V */ + { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */ +}; + static const struct msm_dsi_config msm8976_dsi_cfg = { .io_offset = DSI_6G_REG_SHIFT, - .reg_cfg = { - .num = 2, - .regs = { - {"vdda", 100000, 100}, /* 1.2 V */ - {"vddio", 100000, 100}, /* 1.8 V */ - }, - }, + .regulator_data = msm8976_dsi_regulators, + .num_regulators = ARRAY_SIZE(msm8976_dsi_regulators), .bus_clk_names = dsi_8976_bus_clk_names, .num_bus_clks = ARRAY_SIZE(dsi_8976_bus_clk_names), .io_start = { 0x1a94000, 0x1a96000 }, .num_dsi = 2, }; +static const struct regulator_bulk_data msm8994_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 100000 }, /* 1.25 V */ + { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */ + { .supply = "vcca", .init_load_uA = 10000 }, /* 1.0 V */ + { .supply = "vdd", .init_load_uA = 100000 }, /* 1.8 V */ + { .supply = "lab_reg", .init_load_uA = -1 }, + { .supply = "ibb_reg", .init_load_uA = -1 }, +}; + static const struct msm_dsi_config msm8994_dsi_cfg = { .io_offset = DSI_6G_REG_SHIFT, - .reg_cfg = { - .num = 6, - .regs = { - {"vdda", 100000, 100}, /* 1.25 V */ - {"vddio", 100000, 100}, /* 1.8 V */ - {"vcca", 10000, 100}, /* 1.0 V */ - {"vdd", 100000, 100}, /* 1.8 V */ - {"lab_reg", -1, -1}, - {"ibb_reg", -1, -1}, - }, - }, + .regulator_data = msm8994_dsi_regulators, + .num_regulators = ARRAY_SIZE(msm8994_dsi_regulators), .bus_clk_names = dsi_6g_bus_clk_names, .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names), .io_start = { 0xfd998000, 0xfd9a0000 }, @@ -106,16 +106,16 @@ static const char * const dsi_8996_bus_clk_names[] = { "mdp_core", "iface", "bus", "core_mmss", }; +static const struct regulator_bulk_data msm8996_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 18160 }, /* 1.25 V */ + { .supply = "vcca", .init_load_uA = 17000 }, /* 0.925 V */ + { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */ +}; + static const struct msm_dsi_config msm8996_dsi_cfg = { .io_offset = DSI_6G_REG_SHIFT, - .reg_cfg = { - .num = 2, - .regs = { - {"vdda", 18160, 1 }, /* 1.25 V */ - {"vcca", 17000, 32 }, /* 0.925 V */ - {"vddio", 100000, 100 },/* 1.8 V */ - }, - }, + .regulator_data = msm8996_dsi_regulators, + .num_regulators = ARRAY_SIZE(msm8996_dsi_regulators), .bus_clk_names = dsi_8996_bus_clk_names, .num_bus_clks = ARRAY_SIZE(dsi_8996_bus_clk_names), .io_start = { 0x994000, 0x996000 }, @@ -126,15 +126,15 @@ static const char * const dsi_msm8998_bus_clk_names[] = { "iface", "bus", "core", }; +static const struct regulator_bulk_data msm8998_dsi_regulators[] = { + { .supply = "vdd", .init_load_uA = 367000 }, /* 0.9 V */ + { .supply = "vdda", .init_load_uA = 62800 }, /* 1.2 V */ +}; + static const struct msm_dsi_config msm8998_dsi_cfg = { .io_offset = DSI_6G_REG_SHIFT, - .reg_cfg = { - .num = 2, - .regs = { - {"vdd", 367000, 16 }, /* 0.9 V */ - {"vdda", 62800, 2 }, /* 1.2 V */ - }, - }, + .regulator_data = msm8998_dsi_regulators, + .num_regulators = ARRAY_SIZE(msm8998_dsi_regulators), .bus_clk_names = dsi_msm8998_bus_clk_names, .num_bus_clks = ARRAY_SIZE(dsi_msm8998_bus_clk_names), .io_start = { 0xc994000, 0xc996000 }, @@ -145,14 +145,14 @@ static const char * const dsi_sdm660_bus_clk_names[] = { "iface", "bus", "core", "core_mmss", }; +static const struct regulator_bulk_data sdm660_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 12560 }, /* 1.2 V */ +}; + static const struct msm_dsi_config sdm660_dsi_cfg = { .io_offset = DSI_6G_REG_SHIFT, - .reg_cfg = { - .num = 2, - .regs = { - {"vdda", 12560, 4 }, /* 1.2 V */ - }, - }, + .regulator_data = sdm660_dsi_regulators, + .num_regulators = ARRAY_SIZE(sdm660_dsi_regulators), .bus_clk_names = dsi_sdm660_bus_clk_names, .num_bus_clks = ARRAY_SIZE(dsi_sdm660_bus_clk_names), .io_start = { 0xc994000, 0xc996000 }, @@ -167,28 +167,28 @@ static const char * const dsi_sc7180_bus_clk_names[] = { "iface", "bus", }; +static const struct regulator_bulk_data sdm845_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */ +}; + static const struct msm_dsi_config sdm845_dsi_cfg = { .io_offset = DSI_6G_REG_SHIFT, - .reg_cfg = { - .num = 1, - .regs = { - {"vdda", 21800, 4 }, /* 1.2 V */ - }, - }, + .regulator_data = sdm845_dsi_regulators, + .num_regulators = ARRAY_SIZE(sdm845_dsi_regulators), .bus_clk_names = dsi_sdm845_bus_clk_names, .num_bus_clks = ARRAY_SIZE(dsi_sdm845_bus_clk_names), .io_start = { 0xae94000, 0xae96000 }, .num_dsi = 2, }; +static const struct regulator_bulk_data sc7180_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */ +}; + static const struct msm_dsi_config sc7180_dsi_cfg = { .io_offset = DSI_6G_REG_SHIFT, - .reg_cfg = { - .num = 1, - .regs = { - {"vdda", 21800, 4 }, /* 1.2 V */ - }, - }, + .regulator_data = sc7180_dsi_regulators, + .num_regulators = ARRAY_SIZE(sc7180_dsi_regulators), .bus_clk_names = dsi_sc7180_bus_clk_names, .num_bus_clks = ARRAY_SIZE(dsi_sc7180_bus_clk_names), .io_start = { 0xae94000 }, @@ -199,14 +199,14 @@ static const char * const dsi_sc7280_bus_clk_names[] = { "iface", "bus", }; +static const struct regulator_bulk_data sc7280_dsi_regulators[] = { + { .supply = "vdda", .init_load_uA = 8350 }, /* 1.2 V */ +}; + static const struct msm_dsi_config sc7280_dsi_cfg = { .io_offset = DSI_6G_REG_SHIFT, - .reg_cfg = { - .num = 1, - .regs = { - {"vdda", 8350, 0 }, /* 1.2 V */ - }, - }, + .regulator_data = sc7280_dsi_regulators, + .num_regulators = ARRAY_SIZE(sc7280_dsi_regulators), .bus_clk_names = dsi_sc7280_bus_clk_names, .num_bus_clks = ARRAY_SIZE(dsi_sc7280_bus_clk_names), .io_start = { 0xae94000 }, @@ -217,14 +217,14 @@ static const char * const dsi_qcm2290_bus_clk_names[] = { "iface", "bus", }; +static const struct regulator_bulk_data qcm2290_dsi_cfg_regulators[] = { + { .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */ +}; + static const struct msm_dsi_config qcm2290_dsi_cfg = { .io_offset = DSI_6G_REG_SHIFT, - .reg_cfg = { - .num = 1, - .regs = { - {"vdda", 21800, 4 }, /* 1.2 V */ - }, - }, + .regulator_data = qcm2290_dsi_cfg_regulators, + .num_regulators = ARRAY_SIZE(qcm2290_dsi_cfg_regulators), .bus_clk_names = dsi_qcm2290_bus_clk_names, .num_bus_clks = ARRAY_SIZE(dsi_qcm2290_bus_clk_names), .io_start = { 0x5e94000 }, diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h index fe54a999968b..8f04e685a74e 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h @@ -32,7 +32,8 @@ struct msm_dsi_config { u32 io_offset; - struct dsi_reg_config reg_cfg; + const struct regulator_bulk_data *regulator_data; + int num_regulators; const char * const *bus_clk_names; const int num_bus_clks; const resource_size_t io_start[DSI_MAX]; diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index a34078497af1..7fbf391c024f 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -33,7 +33,7 @@ #define DSI_RESET_TOGGLE_DELAY_MS 20 -static int dsi_populate_dsc_params(struct msm_display_dsc_config *dsc); +static int dsi_populate_dsc_params(struct drm_dsc_config *dsc); static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor) { @@ -108,7 +108,7 @@ struct msm_dsi_host { void __iomem *ctrl_base; phys_addr_t ctrl_size; - struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX]; + struct regulator_bulk_data *supplies; int num_bus_clks; struct clk_bulk_data bus_clks[DSI_BUS_CLK_MAX]; @@ -144,7 +144,6 @@ struct msm_dsi_host { u32 err_work_state; struct work_struct err_work; - struct work_struct hpd_work; struct workqueue_struct *workqueue; /* DSI 6G TX buffer*/ @@ -161,10 +160,9 @@ struct msm_dsi_host { struct regmap *sfpb; struct drm_display_mode *mode; - struct msm_display_dsc_config *dsc; + struct drm_dsc_config *dsc; /* connected device info */ - struct device_node *device_node; unsigned int channel; unsigned int lanes; enum mipi_dsi_pixel_format format; @@ -205,9 +203,6 @@ static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data) msm_writel(data, msm_host->ctrl_base + reg); } -static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host); -static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host); - static const struct msm_dsi_cfg_handler *dsi_get_config( struct msm_dsi_host *msm_host) { @@ -258,76 +253,6 @@ static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host) return container_of(host, struct msm_dsi_host, base); } -static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host) -{ - struct regulator_bulk_data *s = msm_host->supplies; - const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs; - int num = msm_host->cfg_hnd->cfg->reg_cfg.num; - int i; - - DBG(""); - for (i = num - 1; i >= 0; i--) - if (regs[i].disable_load >= 0) - regulator_set_load(s[i].consumer, - regs[i].disable_load); - - regulator_bulk_disable(num, s); -} - -static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host) -{ - struct regulator_bulk_data *s = msm_host->supplies; - const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs; - int num = msm_host->cfg_hnd->cfg->reg_cfg.num; - int ret, i; - - DBG(""); - for (i = 0; i < num; i++) { - if (regs[i].enable_load >= 0) { - ret = regulator_set_load(s[i].consumer, - regs[i].enable_load); - if (ret < 0) { - pr_err("regulator %d set op mode failed, %d\n", - i, ret); - goto fail; - } - } - } - - ret = regulator_bulk_enable(num, s); - if (ret < 0) { - pr_err("regulator enable failed, %d\n", ret); - goto fail; - } - - return 0; - -fail: - for (i--; i >= 0; i--) - regulator_set_load(s[i].consumer, regs[i].disable_load); - return ret; -} - -static int dsi_regulator_init(struct msm_dsi_host *msm_host) -{ - struct regulator_bulk_data *s = msm_host->supplies; - const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs; - int num = msm_host->cfg_hnd->cfg->reg_cfg.num; - int i, ret; - - for (i = 0; i < num; i++) - s[i].supply = regs[i].name; - - ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s); - if (ret < 0) { - pr_err("%s: failed to init regulator, ret=%d\n", - __func__, ret); - return ret; - } - - return 0; -} - int dsi_clk_init_v2(struct msm_dsi_host *msm_host) { struct platform_device *pdev = msm_host->pdev; @@ -916,7 +841,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode, u32 hdisplay) { - struct msm_display_dsc_config *dsc = msm_host->dsc; + struct drm_dsc_config *dsc = msm_host->dsc; u32 reg, intf_width, reg_ctrl, reg_ctrl2; u32 slice_per_intf, total_bytes_per_intf; u32 pkt_per_line; @@ -927,24 +852,24 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod * compress mode registers */ intf_width = hdisplay; - slice_per_intf = DIV_ROUND_UP(intf_width, dsc->drm->slice_width); + slice_per_intf = DIV_ROUND_UP(intf_width, dsc->slice_width); /* If slice_per_pkt is greater than slice_per_intf * then default to 1. This can happen during partial * update. */ - if (slice_per_intf > dsc->drm->slice_count) - dsc->drm->slice_count = 1; + if (slice_per_intf > dsc->slice_count) + dsc->slice_count = 1; - slice_per_intf = DIV_ROUND_UP(hdisplay, dsc->drm->slice_width); - bytes_in_slice = DIV_ROUND_UP(dsc->drm->slice_width * dsc->drm->bits_per_pixel, 8); + slice_per_intf = DIV_ROUND_UP(hdisplay, dsc->slice_width); + bytes_in_slice = DIV_ROUND_UP(dsc->slice_width * dsc->bits_per_pixel, 8); - dsc->drm->slice_chunk_size = bytes_in_slice; + dsc->slice_chunk_size = bytes_in_slice; total_bytes_per_intf = bytes_in_slice * slice_per_intf; eol_byte_num = total_bytes_per_intf % 3; - pkt_per_line = slice_per_intf / dsc->drm->slice_count; + pkt_per_line = slice_per_intf / dsc->slice_count; if (is_cmd_mode) /* packet data type */ reg = DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE(MIPI_DSI_DCS_LONG_WRITE); @@ -1009,7 +934,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi) } if (msm_host->dsc) { - struct msm_display_dsc_config *dsc = msm_host->dsc; + struct drm_dsc_config *dsc = msm_host->dsc; /* update dsc params with timing params */ if (!dsc || !mode->hdisplay || !mode->vdisplay) { @@ -1018,9 +943,9 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi) return; } - dsc->drm->pic_width = mode->hdisplay; - dsc->drm->pic_height = mode->vdisplay; - DBG("Mode %dx%d\n", dsc->drm->pic_width, dsc->drm->pic_height); + dsc->pic_width = mode->hdisplay; + dsc->pic_height = mode->vdisplay; + DBG("Mode %dx%d\n", dsc->pic_width, dsc->pic_height); /* we do the calculations for dsc parameters here so that * panel can use these parameters @@ -1500,14 +1425,6 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host, return len; } -static void dsi_hpd_worker(struct work_struct *work) -{ - struct msm_dsi_host *msm_host = - container_of(work, struct msm_dsi_host, hpd_work); - - drm_helper_hpd_irq_event(msm_host->dev); -} - static void dsi_err_worker(struct work_struct *work) { struct msm_dsi_host *msm_host = @@ -1686,6 +1603,8 @@ static int dsi_host_attach(struct mipi_dsi_host *host, msm_host->lanes = dsi->lanes; msm_host->format = dsi->format; msm_host->mode_flags = dsi->mode_flags; + if (dsi->dsc) + msm_host->dsc = dsi->dsc; /* Some gpios defined in panel DT need to be controlled by host */ ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev); @@ -1697,8 +1616,6 @@ static int dsi_host_attach(struct mipi_dsi_host *host, return ret; DBG("id=%d", msm_host->id); - if (msm_host->dev) - queue_work(msm_host->workqueue, &msm_host->hpd_work); return 0; } @@ -1710,11 +1627,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host, dsi_dev_detach(msm_host->pdev); - msm_host->device_node = NULL; - DBG("id=%d", msm_host->id); - if (msm_host->dev) - queue_work(msm_host->workqueue, &msm_host->hpd_work); return 0; } @@ -1841,7 +1754,7 @@ static char bpg_offset[DSC_NUM_BUF_RANGES] = { 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 }; -static int dsi_populate_dsc_params(struct msm_display_dsc_config *dsc) +static int dsi_populate_dsc_params(struct drm_dsc_config *dsc) { int mux_words_size; int groups_per_line, groups_total; @@ -1854,98 +1767,98 @@ static int dsi_populate_dsc_params(struct msm_display_dsc_config *dsc) int final_value, final_scale; int i; - dsc->drm->rc_model_size = 8192; - dsc->drm->first_line_bpg_offset = 12; - dsc->drm->rc_edge_factor = 6; - dsc->drm->rc_tgt_offset_high = 3; - dsc->drm->rc_tgt_offset_low = 3; - dsc->drm->simple_422 = 0; - dsc->drm->convert_rgb = 1; - dsc->drm->vbr_enable = 0; + dsc->rc_model_size = 8192; + dsc->first_line_bpg_offset = 12; + dsc->rc_edge_factor = 6; + dsc->rc_tgt_offset_high = 3; + dsc->rc_tgt_offset_low = 3; + dsc->simple_422 = 0; + dsc->convert_rgb = 1; + dsc->vbr_enable = 0; /* handle only bpp = bpc = 8 */ for (i = 0; i < DSC_NUM_BUF_RANGES - 1 ; i++) - dsc->drm->rc_buf_thresh[i] = dsi_dsc_rc_buf_thresh[i]; + dsc->rc_buf_thresh[i] = dsi_dsc_rc_buf_thresh[i]; for (i = 0; i < DSC_NUM_BUF_RANGES; i++) { - dsc->drm->rc_range_params[i].range_min_qp = min_qp[i]; - dsc->drm->rc_range_params[i].range_max_qp = max_qp[i]; - dsc->drm->rc_range_params[i].range_bpg_offset = bpg_offset[i]; + dsc->rc_range_params[i].range_min_qp = min_qp[i]; + dsc->rc_range_params[i].range_max_qp = max_qp[i]; + dsc->rc_range_params[i].range_bpg_offset = bpg_offset[i]; } - dsc->drm->initial_offset = 6144; /* Not bpp 12 */ - if (dsc->drm->bits_per_pixel != 8) - dsc->drm->initial_offset = 2048; /* bpp = 12 */ + dsc->initial_offset = 6144; /* Not bpp 12 */ + if (dsc->bits_per_pixel != 8) + dsc->initial_offset = 2048; /* bpp = 12 */ mux_words_size = 48; /* bpc == 8/10 */ - if (dsc->drm->bits_per_component == 12) + if (dsc->bits_per_component == 12) mux_words_size = 64; - dsc->drm->initial_xmit_delay = 512; - dsc->drm->initial_scale_value = 32; - dsc->drm->first_line_bpg_offset = 12; - dsc->drm->line_buf_depth = dsc->drm->bits_per_component + 1; + dsc->initial_xmit_delay = 512; + dsc->initial_scale_value = 32; + dsc->first_line_bpg_offset = 12; + dsc->line_buf_depth = dsc->bits_per_component + 1; /* bpc 8 */ - dsc->drm->flatness_min_qp = 3; - dsc->drm->flatness_max_qp = 12; - dsc->drm->rc_quant_incr_limit0 = 11; - dsc->drm->rc_quant_incr_limit1 = 11; - dsc->drm->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC; + dsc->flatness_min_qp = 3; + dsc->flatness_max_qp = 12; + dsc->rc_quant_incr_limit0 = 11; + dsc->rc_quant_incr_limit1 = 11; + dsc->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC; /* FIXME: need to call drm_dsc_compute_rc_parameters() so that rest of * params are calculated */ - groups_per_line = DIV_ROUND_UP(dsc->drm->slice_width, 3); - dsc->drm->slice_chunk_size = dsc->drm->slice_width * dsc->drm->bits_per_pixel / 8; - if ((dsc->drm->slice_width * dsc->drm->bits_per_pixel) % 8) - dsc->drm->slice_chunk_size++; + groups_per_line = DIV_ROUND_UP(dsc->slice_width, 3); + dsc->slice_chunk_size = dsc->slice_width * dsc->bits_per_pixel / 8; + if ((dsc->slice_width * dsc->bits_per_pixel) % 8) + dsc->slice_chunk_size++; /* rbs-min */ - min_rate_buffer_size = dsc->drm->rc_model_size - dsc->drm->initial_offset + - dsc->drm->initial_xmit_delay * dsc->drm->bits_per_pixel + - groups_per_line * dsc->drm->first_line_bpg_offset; + min_rate_buffer_size = dsc->rc_model_size - dsc->initial_offset + + dsc->initial_xmit_delay * dsc->bits_per_pixel + + groups_per_line * dsc->first_line_bpg_offset; - hrd_delay = DIV_ROUND_UP(min_rate_buffer_size, dsc->drm->bits_per_pixel); + hrd_delay = DIV_ROUND_UP(min_rate_buffer_size, dsc->bits_per_pixel); - dsc->drm->initial_dec_delay = hrd_delay - dsc->drm->initial_xmit_delay; + dsc->initial_dec_delay = hrd_delay - dsc->initial_xmit_delay; - dsc->drm->initial_scale_value = 8 * dsc->drm->rc_model_size / - (dsc->drm->rc_model_size - dsc->drm->initial_offset); + dsc->initial_scale_value = 8 * dsc->rc_model_size / + (dsc->rc_model_size - dsc->initial_offset); - slice_bits = 8 * dsc->drm->slice_chunk_size * dsc->drm->slice_height; + slice_bits = 8 * dsc->slice_chunk_size * dsc->slice_height; - groups_total = groups_per_line * dsc->drm->slice_height; + groups_total = groups_per_line * dsc->slice_height; - data = dsc->drm->first_line_bpg_offset * 2048; + data = dsc->first_line_bpg_offset * 2048; - dsc->drm->nfl_bpg_offset = DIV_ROUND_UP(data, (dsc->drm->slice_height - 1)); + dsc->nfl_bpg_offset = DIV_ROUND_UP(data, (dsc->slice_height - 1)); - pre_num_extra_mux_bits = 3 * (mux_words_size + (4 * dsc->drm->bits_per_component + 4) - 2); + pre_num_extra_mux_bits = 3 * (mux_words_size + (4 * dsc->bits_per_component + 4) - 2); num_extra_mux_bits = pre_num_extra_mux_bits - (mux_words_size - ((slice_bits - pre_num_extra_mux_bits) % mux_words_size)); - data = 2048 * (dsc->drm->rc_model_size - dsc->drm->initial_offset + num_extra_mux_bits); - dsc->drm->slice_bpg_offset = DIV_ROUND_UP(data, groups_total); + data = 2048 * (dsc->rc_model_size - dsc->initial_offset + num_extra_mux_bits); + dsc->slice_bpg_offset = DIV_ROUND_UP(data, groups_total); /* bpp * 16 + 0.5 */ - data = dsc->drm->bits_per_pixel * 16; + data = dsc->bits_per_pixel * 16; data *= 2; data++; data /= 2; target_bpp_x16 = data; - data = (dsc->drm->initial_xmit_delay * target_bpp_x16) / 16; - final_value = dsc->drm->rc_model_size - data + num_extra_mux_bits; - dsc->drm->final_offset = final_value; + data = (dsc->initial_xmit_delay * target_bpp_x16) / 16; + final_value = dsc->rc_model_size - data + num_extra_mux_bits; + dsc->final_offset = final_value; - final_scale = 8 * dsc->drm->rc_model_size / (dsc->drm->rc_model_size - final_value); + final_scale = 8 * dsc->rc_model_size / (dsc->rc_model_size - final_value); - data = (final_scale - 9) * (dsc->drm->nfl_bpg_offset + dsc->drm->slice_bpg_offset); - dsc->drm->scale_increment_interval = (2048 * dsc->drm->final_offset) / data; + data = (final_scale - 9) * (dsc->nfl_bpg_offset + dsc->slice_bpg_offset); + dsc->scale_increment_interval = (2048 * dsc->final_offset) / data; - dsc->drm->scale_decrement_interval = groups_per_line / (dsc->drm->initial_scale_value - 8); + dsc->scale_decrement_interval = groups_per_line / (dsc->initial_scale_value - 8); return 0; } @@ -1954,7 +1867,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) { struct device *dev = &msm_host->pdev->dev; struct device_node *np = dev->of_node; - struct device_node *endpoint, *device_node; + struct device_node *endpoint; int ret = 0; /* @@ -1977,16 +1890,6 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) goto err; } - /* Get panel node from the output port's endpoint data */ - device_node = of_graph_get_remote_node(np, 1, 0); - if (!device_node) { - DRM_DEV_DEBUG(dev, "%s: no valid device\n", __func__); - ret = -ENODEV; - goto err; - } - - msm_host->device_node = device_node; - if (of_property_read_bool(np, "syscon-sfpb")) { msm_host->sfpb = syscon_regmap_lookup_by_phandle(np, "syscon-sfpb"); @@ -1997,8 +1900,6 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) } } - of_node_put(device_node); - err: of_node_put(endpoint); @@ -2028,6 +1929,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) { struct msm_dsi_host *msm_host = NULL; struct platform_device *pdev = msm_dsi->pdev; + const struct msm_dsi_config *cfg; int ret; msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL); @@ -2060,6 +1962,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) pr_err("%s: get config failed\n", __func__); goto fail; } + cfg = msm_host->cfg_hnd->cfg; msm_host->id = dsi_host_get_id(msm_host); if (msm_host->id < 0) { @@ -2069,13 +1972,13 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) } /* fixup base address by io offset */ - msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset; + msm_host->ctrl_base += cfg->io_offset; - ret = dsi_regulator_init(msm_host); - if (ret) { - pr_err("%s: regulator init failed\n", __func__); + ret = devm_regulator_bulk_get_const(&pdev->dev, cfg->num_regulators, + cfg->regulator_data, + &msm_host->supplies); + if (ret) goto fail; - } ret = dsi_clk_init(msm_host); if (ret) { @@ -2126,7 +2029,6 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) /* setup workqueue */ msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0); INIT_WORK(&msm_host->err_work, dsi_err_worker); - INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker); msm_dsi->id = msm_host->id; @@ -2159,23 +2061,9 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; - struct drm_panel *panel; int ret; msm_host->dev = dev; - panel = msm_dsi_host_get_panel(&msm_host->base); - - if (!IS_ERR(panel) && panel->dsc) { - struct msm_display_dsc_config *dsc = msm_host->dsc; - - if (!dsc) { - dsc = devm_kzalloc(&msm_host->pdev->dev, sizeof(*dsc), GFP_KERNEL); - if (!dsc) - return -ENOMEM; - dsc->drm = panel->dsc; - msm_host->dsc = dsc; - } - } ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K); if (ret) { @@ -2556,7 +2444,8 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host, msm_dsi_sfpb_config(msm_host, true); - ret = dsi_host_regulator_enable(msm_host); + ret = regulator_bulk_enable(msm_host->cfg_hnd->cfg->num_regulators, + msm_host->supplies); if (ret) { pr_err("%s:Failed to enable vregs.ret=%d\n", __func__, ret); @@ -2596,7 +2485,8 @@ fail_disable_clk: cfg_hnd->ops->link_clk_disable(msm_host); pm_runtime_put(&msm_host->pdev->dev); fail_disable_reg: - dsi_host_regulator_disable(msm_host); + regulator_bulk_disable(msm_host->cfg_hnd->cfg->num_regulators, + msm_host->supplies); unlock_ret: mutex_unlock(&msm_host->dev_mutex); return ret; @@ -2623,7 +2513,8 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host) cfg_hnd->ops->link_clk_disable(msm_host); pm_runtime_put(&msm_host->pdev->dev); - dsi_host_regulator_disable(msm_host); + regulator_bulk_disable(msm_host->cfg_hnd->cfg->num_regulators, + msm_host->supplies); msm_dsi_sfpb_config(msm_host, false); @@ -2659,45 +2550,33 @@ enum drm_mode_status msm_dsi_host_check_dsc(struct mipi_dsi_host *host, const struct drm_display_mode *mode) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); - struct msm_display_dsc_config *dsc = msm_host->dsc; + struct drm_dsc_config *dsc = msm_host->dsc; int pic_width = mode->hdisplay; int pic_height = mode->vdisplay; if (!msm_host->dsc) return MODE_OK; - if (pic_width % dsc->drm->slice_width) { + if (pic_width % dsc->slice_width) { pr_err("DSI: pic_width %d has to be multiple of slice %d\n", - pic_width, dsc->drm->slice_width); + pic_width, dsc->slice_width); return MODE_H_ILLEGAL; } - if (pic_height % dsc->drm->slice_height) { + if (pic_height % dsc->slice_height) { pr_err("DSI: pic_height %d has to be multiple of slice %d\n", - pic_height, dsc->drm->slice_height); + pic_height, dsc->slice_height); return MODE_V_ILLEGAL; } return MODE_OK; } -struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host) -{ - return of_drm_find_panel(to_msm_dsi_host(host)->device_node); -} - unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host) { return to_msm_dsi_host(host)->mode_flags; } -struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host) -{ - struct msm_dsi_host *msm_host = to_msm_dsi_host(host); - - return of_drm_find_bridge(msm_host->device_node); -} - void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); @@ -2771,7 +2650,7 @@ void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host) DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER); } -struct msm_display_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host) +struct drm_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index cb84d185d73a..3a1417397283 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -141,14 +141,11 @@ static int enable_phy(struct msm_dsi *msm_dsi, struct msm_dsi_phy_shared_timings *shared_timings) { struct msm_dsi_phy_clk_request clk_req; - int ret; bool is_bonded_dsi = IS_BONDED_DSI(); msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_bonded_dsi); - ret = msm_dsi_phy_enable(msm_dsi->phy, &clk_req, shared_timings); - - return ret; + return msm_dsi_phy_enable(msm_dsi->phy, &clk_req, shared_timings); } static int @@ -214,39 +211,26 @@ static void dsi_mgr_phy_disable(int id) } } -struct dsi_connector { - struct drm_connector base; - int id; -}; - struct dsi_bridge { struct drm_bridge base; int id; }; -#define to_dsi_connector(x) container_of(x, struct dsi_connector, base) #define to_dsi_bridge(x) container_of(x, struct dsi_bridge, base) -static inline int dsi_mgr_connector_get_id(struct drm_connector *connector) -{ - struct dsi_connector *dsi_connector = to_dsi_connector(connector); - return dsi_connector->id; -} - static int dsi_mgr_bridge_get_id(struct drm_bridge *bridge) { struct dsi_bridge *dsi_bridge = to_dsi_bridge(bridge); return dsi_bridge->id; } -static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id) +static void msm_dsi_manager_set_split_display(u8 id) { - struct msm_drm_private *priv = conn->dev->dev_private; - struct msm_kms *kms = priv->kms; struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id); + struct msm_drm_private *priv = msm_dsi->dev->dev_private; + struct msm_kms *kms = priv->kms; struct msm_dsi *master_dsi, *slave_dsi; - struct drm_panel *panel; if (IS_BONDED_DSI() && !IS_MASTER_DSI_LINK(id)) { master_dsi = other_dsi; @@ -256,89 +240,18 @@ static int msm_dsi_manager_panel_init(struct drm_connector *conn, u8 id) slave_dsi = other_dsi; } - /* - * There is only 1 panel in the global panel list for bonded DSI mode. - * Therefore slave dsi should get the drm_panel instance from master - * dsi. - */ - panel = msm_dsi_host_get_panel(master_dsi->host); - if (IS_ERR(panel)) { - DRM_ERROR("Could not find panel for %u (%ld)\n", msm_dsi->id, - PTR_ERR(panel)); - return PTR_ERR(panel); - } - - if (!panel || !IS_BONDED_DSI()) - goto out; - - drm_object_attach_property(&conn->base, - conn->dev->mode_config.tile_property, 0); + if (!msm_dsi->external_bridge || !IS_BONDED_DSI()) + return; /* * Set split display info to kms once bonded DSI panel is connected to * both hosts. */ - if (other_dsi && other_dsi->panel && kms->funcs->set_split_display) { + if (other_dsi && other_dsi->external_bridge && kms->funcs->set_split_display) { kms->funcs->set_split_display(kms, master_dsi->encoder, slave_dsi->encoder, msm_dsi_is_cmd_mode(msm_dsi)); } - -out: - msm_dsi->panel = panel; - return 0; -} - -static enum drm_connector_status dsi_mgr_connector_detect( - struct drm_connector *connector, bool force) -{ - int id = dsi_mgr_connector_get_id(connector); - struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); - - return msm_dsi->panel ? connector_status_connected : - connector_status_disconnected; -} - -static void dsi_mgr_connector_destroy(struct drm_connector *connector) -{ - struct dsi_connector *dsi_connector = to_dsi_connector(connector); - - DBG(""); - - drm_connector_cleanup(connector); - - kfree(dsi_connector); -} - -static int dsi_mgr_connector_get_modes(struct drm_connector *connector) -{ - int id = dsi_mgr_connector_get_id(connector); - struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); - struct drm_panel *panel = msm_dsi->panel; - int num; - - if (!panel) - return 0; - - /* - * In bonded DSI mode, we have one connector that can be - * attached to the drm_panel. - */ - num = drm_panel_get_modes(panel, connector); - if (!num) - return 0; - - return num; -} - -static struct drm_encoder * -dsi_mgr_connector_best_encoder(struct drm_connector *connector) -{ - int id = dsi_mgr_connector_get_id(connector); - struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); - - DBG(""); - return msm_dsi_get_encoder(msm_dsi); } static void dsi_mgr_bridge_power_on(struct drm_bridge *bridge) @@ -403,7 +316,6 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); struct mipi_dsi_host *host = msm_dsi->host; - struct drm_panel *panel = msm_dsi->panel; bool is_bonded_dsi = IS_BONDED_DSI(); int ret; @@ -418,18 +330,6 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) if (!dsi_mgr_power_on_early(bridge)) dsi_mgr_bridge_power_on(bridge); - /* Always call panel functions once, because even for dual panels, - * there is only one drm_panel instance. - */ - if (panel) { - ret = drm_panel_prepare(panel); - if (ret) { - pr_err("%s: prepare panel %d failed, %d\n", __func__, - id, ret); - goto panel_prep_fail; - } - } - ret = msm_dsi_host_enable(host); if (ret) { pr_err("%s: enable host %d failed, %d\n", __func__, id, ret); @@ -449,9 +349,6 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) host1_en_fail: msm_dsi_host_disable(host); host_en_fail: - if (panel) - drm_panel_unprepare(panel); -panel_prep_fail: return; } @@ -469,62 +366,12 @@ void msm_dsi_manager_tpg_enable(void) } } -static void dsi_mgr_bridge_enable(struct drm_bridge *bridge) -{ - int id = dsi_mgr_bridge_get_id(bridge); - struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); - struct drm_panel *panel = msm_dsi->panel; - bool is_bonded_dsi = IS_BONDED_DSI(); - int ret; - - DBG("id=%d", id); - if (!msm_dsi_device_connected(msm_dsi)) - return; - - /* Do nothing with the host if it is slave-DSI in case of bonded DSI */ - if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id)) - return; - - if (panel) { - ret = drm_panel_enable(panel); - if (ret) { - pr_err("%s: enable panel %d failed, %d\n", __func__, id, - ret); - } - } -} - -static void dsi_mgr_bridge_disable(struct drm_bridge *bridge) -{ - int id = dsi_mgr_bridge_get_id(bridge); - struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); - struct drm_panel *panel = msm_dsi->panel; - bool is_bonded_dsi = IS_BONDED_DSI(); - int ret; - - DBG("id=%d", id); - if (!msm_dsi_device_connected(msm_dsi)) - return; - - /* Do nothing with the host if it is slave-DSI in case of bonded DSI */ - if (is_bonded_dsi && !IS_MASTER_DSI_LINK(id)) - return; - - if (panel) { - ret = drm_panel_disable(panel); - if (ret) - pr_err("%s: Panel %d OFF failed, %d\n", __func__, id, - ret); - } -} - static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) { int id = dsi_mgr_bridge_get_id(bridge); struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); struct mipi_dsi_host *host = msm_dsi->host; - struct drm_panel *panel = msm_dsi->panel; bool is_bonded_dsi = IS_BONDED_DSI(); int ret; @@ -551,13 +398,6 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) pr_err("%s: host1 disable failed, %d\n", __func__, ret); } - if (panel) { - ret = drm_panel_unprepare(panel); - if (ret) - pr_err("%s: Panel %d unprepare failed,%d\n", __func__, - id, ret); - } - msm_dsi_host_disable_irq(host); if (is_bonded_dsi && msm_dsi1) msm_dsi_host_disable_irq(msm_dsi1->host); @@ -614,76 +454,13 @@ static enum drm_mode_status dsi_mgr_bridge_mode_valid(struct drm_bridge *bridge, return msm_dsi_host_check_dsc(host, mode); } -static const struct drm_connector_funcs dsi_mgr_connector_funcs = { - .detect = dsi_mgr_connector_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = dsi_mgr_connector_destroy, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static const struct drm_connector_helper_funcs dsi_mgr_conn_helper_funcs = { - .get_modes = dsi_mgr_connector_get_modes, - .best_encoder = dsi_mgr_connector_best_encoder, -}; - static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = { .pre_enable = dsi_mgr_bridge_pre_enable, - .enable = dsi_mgr_bridge_enable, - .disable = dsi_mgr_bridge_disable, .post_disable = dsi_mgr_bridge_post_disable, .mode_set = dsi_mgr_bridge_mode_set, .mode_valid = dsi_mgr_bridge_mode_valid, }; -/* initialize connector when we're connected to a drm_panel */ -struct drm_connector *msm_dsi_manager_connector_init(u8 id) -{ - struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); - struct drm_connector *connector = NULL; - struct dsi_connector *dsi_connector; - int ret; - - dsi_connector = kzalloc(sizeof(*dsi_connector), GFP_KERNEL); - if (!dsi_connector) - return ERR_PTR(-ENOMEM); - - dsi_connector->id = id; - - connector = &dsi_connector->base; - - ret = drm_connector_init(msm_dsi->dev, connector, - &dsi_mgr_connector_funcs, DRM_MODE_CONNECTOR_DSI); - if (ret) - return ERR_PTR(ret); - - drm_connector_helper_add(connector, &dsi_mgr_conn_helper_funcs); - - /* Enable HPD to let hpd event is handled - * when panel is attached to the host. - */ - connector->polled = DRM_CONNECTOR_POLL_HPD; - - /* Display driver doesn't support interlace now. */ - connector->interlace_allowed = 0; - connector->doublescan_allowed = 0; - - drm_connector_attach_encoder(connector, msm_dsi->encoder); - - ret = msm_dsi_manager_panel_init(connector, id); - if (ret) { - DRM_DEV_ERROR(msm_dsi->dev->dev, "init panel failed %d\n", ret); - goto fail; - } - - return connector; - -fail: - connector->funcs->destroy(connector); - return ERR_PTR(ret); -} - /* initialize bridge */ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id) { @@ -722,18 +499,21 @@ fail: return ERR_PTR(ret); } -struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id) +int msm_dsi_manager_ext_bridge_init(u8 id) { struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct drm_device *dev = msm_dsi->dev; - struct drm_connector *connector; struct drm_encoder *encoder; struct drm_bridge *int_bridge, *ext_bridge; int ret; int_bridge = msm_dsi->bridge; - ext_bridge = msm_dsi->external_bridge = - msm_dsi_host_get_bridge(msm_dsi->host); + ext_bridge = devm_drm_of_get_bridge(&msm_dsi->pdev->dev, + msm_dsi->pdev->dev.of_node, 1, 0); + if (IS_ERR(ext_bridge)) + return PTR_ERR(ext_bridge); + + msm_dsi->external_bridge = ext_bridge; encoder = msm_dsi->encoder; @@ -745,36 +525,32 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id) ret = drm_bridge_attach(encoder, ext_bridge, int_bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret == -EINVAL) { - struct drm_connector *connector; - struct list_head *connector_list; - - /* link the internal dsi bridge to the external bridge */ - drm_bridge_attach(encoder, ext_bridge, int_bridge, 0); - /* - * we need the drm_connector created by the external bridge - * driver (or someone else) to feed it to our driver's - * priv->connector[] list, mainly for msm_fbdev_init() + * link the internal dsi bridge to the external bridge, + * connector is created by the next bridge. */ - connector_list = &dev->mode_config.connector_list; + ret = drm_bridge_attach(encoder, ext_bridge, int_bridge, 0); + if (ret < 0) + return ret; + } else { + struct drm_connector *connector; - list_for_each_entry(connector, connector_list, head) { - if (drm_connector_has_possible_encoder(connector, encoder)) - return connector; + /* We are in charge of the connector, create one now. */ + connector = drm_bridge_connector_init(dev, encoder); + if (IS_ERR(connector)) { + DRM_ERROR("Unable to create bridge connector\n"); + return PTR_ERR(connector); } - return ERR_PTR(-ENODEV); - } - - connector = drm_bridge_connector_init(dev, encoder); - if (IS_ERR(connector)) { - DRM_ERROR("Unable to create bridge connector\n"); - return ERR_CAST(connector); + ret = drm_connector_attach_encoder(connector, encoder); + if (ret < 0) + return ret; } - drm_connector_attach_encoder(connector, encoder); + /* The pipeline is ready, ping encoders if necessary */ + msm_dsi_manager_set_split_display(id); - return connector; + return 0; } void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge) diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index a39de3bdc7fa..7fc0975cb869 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -347,7 +347,7 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, } else { timing->shared_timings.clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false); - timing->shared_timings.clk_pre_inc_by_2 = 0; + timing->shared_timings.clk_pre_inc_by_2 = 0; } timing->ta_go = 3; @@ -507,82 +507,6 @@ int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing, return 0; } -static int dsi_phy_regulator_init(struct msm_dsi_phy *phy) -{ - struct regulator_bulk_data *s = phy->supplies; - const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs; - struct device *dev = &phy->pdev->dev; - int num = phy->cfg->reg_cfg.num; - int i, ret; - - for (i = 0; i < num; i++) - s[i].supply = regs[i].name; - - ret = devm_regulator_bulk_get(dev, num, s); - if (ret < 0) { - if (ret != -EPROBE_DEFER) { - DRM_DEV_ERROR(dev, - "%s: failed to init regulator, ret=%d\n", - __func__, ret); - } - - return ret; - } - - return 0; -} - -static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy) -{ - struct regulator_bulk_data *s = phy->supplies; - const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs; - int num = phy->cfg->reg_cfg.num; - int i; - - DBG(""); - for (i = num - 1; i >= 0; i--) - if (regs[i].disable_load >= 0) - regulator_set_load(s[i].consumer, regs[i].disable_load); - - regulator_bulk_disable(num, s); -} - -static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy) -{ - struct regulator_bulk_data *s = phy->supplies; - const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs; - struct device *dev = &phy->pdev->dev; - int num = phy->cfg->reg_cfg.num; - int ret, i; - - DBG(""); - for (i = 0; i < num; i++) { - if (regs[i].enable_load >= 0) { - ret = regulator_set_load(s[i].consumer, - regs[i].enable_load); - if (ret < 0) { - DRM_DEV_ERROR(dev, - "regulator %d set op mode failed, %d\n", - i, ret); - goto fail; - } - } - } - - ret = regulator_bulk_enable(num, s); - if (ret < 0) { - DRM_DEV_ERROR(dev, "regulator enable failed, %d\n", ret); - goto fail; - } - - return 0; - -fail: - for (i--; i >= 0; i--) - regulator_set_load(s[i].consumer, regs[i].disable_load); - return ret; -} - static int dsi_phy_enable_resource(struct msm_dsi_phy *phy) { struct device *dev = &phy->pdev->dev; @@ -697,12 +621,9 @@ static int dsi_phy_driver_probe(struct platform_device *pdev) phy->pdev = pdev; phy->id = dsi_phy_get_id(phy); - if (phy->id < 0) { - ret = phy->id; - DRM_DEV_ERROR(dev, "%s: couldn't identify PHY index, %d\n", - __func__, ret); - goto fail; - } + if (phy->id < 0) + return dev_err_probe(dev, phy->id, + "Couldn't identify PHY index\n"); phy->regulator_ldo_mode = of_property_read_bool(dev->of_node, "qcom,dsi-phy-regulator-ldo-mode"); @@ -710,86 +631,71 @@ static int dsi_phy_driver_probe(struct platform_device *pdev) phy->cphy_mode = (phy_type == PHY_TYPE_CPHY); phy->base = msm_ioremap_size(pdev, "dsi_phy", &phy->base_size); - if (IS_ERR(phy->base)) { - DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__); - ret = -ENOMEM; - goto fail; - } + if (IS_ERR(phy->base)) + return dev_err_probe(dev, PTR_ERR(phy->base), + "Failed to map phy base\n"); phy->pll_base = msm_ioremap_size(pdev, "dsi_pll", &phy->pll_size); - if (IS_ERR(phy->pll_base)) { - DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__); - ret = -ENOMEM; - goto fail; - } + if (IS_ERR(phy->pll_base)) + return dev_err_probe(dev, PTR_ERR(phy->pll_base), + "Failed to map pll base\n"); if (phy->cfg->has_phy_lane) { phy->lane_base = msm_ioremap_size(pdev, "dsi_phy_lane", &phy->lane_size); - if (IS_ERR(phy->lane_base)) { - DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n", __func__); - ret = -ENOMEM; - goto fail; - } + if (IS_ERR(phy->lane_base)) + return dev_err_probe(dev, PTR_ERR(phy->lane_base), + "Failed to map phy lane base\n"); } if (phy->cfg->has_phy_regulator) { phy->reg_base = msm_ioremap_size(pdev, "dsi_phy_regulator", &phy->reg_size); - if (IS_ERR(phy->reg_base)) { - DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n", __func__); - ret = -ENOMEM; - goto fail; - } + if (IS_ERR(phy->reg_base)) + return dev_err_probe(dev, PTR_ERR(phy->reg_base), + "Failed to map phy regulator base\n"); } if (phy->cfg->ops.parse_dt_properties) { ret = phy->cfg->ops.parse_dt_properties(phy); if (ret) - goto fail; + return ret; } - ret = dsi_phy_regulator_init(phy); + ret = devm_regulator_bulk_get_const(dev, phy->cfg->num_regulators, + phy->cfg->regulator_data, + &phy->supplies); if (ret) - goto fail; + return ret; phy->ahb_clk = msm_clk_get(pdev, "iface"); - if (IS_ERR(phy->ahb_clk)) { - DRM_DEV_ERROR(dev, "%s: Unable to get ahb clk\n", __func__); - ret = PTR_ERR(phy->ahb_clk); - goto fail; - } + if (IS_ERR(phy->ahb_clk)) + return dev_err_probe(dev, PTR_ERR(phy->ahb_clk), + "Unable to get ahb clk\n"); /* PLL init will call into clk_register which requires * register access, so we need to enable power and ahb clock. */ ret = dsi_phy_enable_resource(phy); if (ret) - goto fail; + return ret; if (phy->cfg->ops.pll_init) { ret = phy->cfg->ops.pll_init(phy); - if (ret) { - DRM_DEV_INFO(dev, - "%s: pll init failed: %d, need separate pll clk driver\n", - __func__, ret); - goto fail; - } + if (ret) + return dev_err_probe(dev, ret, + "PLL init failed; need separate clk driver\n"); } ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, phy->provided_clocks); - if (ret) { - DRM_DEV_ERROR(dev, "%s: failed to register clk provider: %d\n", __func__, ret); - goto fail; - } + if (ret) + return dev_err_probe(dev, ret, + "Failed to register clk provider\n"); dsi_phy_disable_resource(phy); platform_set_drvdata(pdev, phy); return 0; - -fail: - return ret; } static struct platform_driver dsi_phy_platform_driver = { @@ -829,7 +735,7 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, goto res_en_fail; } - ret = dsi_phy_regulator_enable(phy); + ret = regulator_bulk_enable(phy->cfg->num_regulators, phy->supplies); if (ret) { DRM_DEV_ERROR(dev, "%s: regulator enable failed, %d\n", __func__, ret); @@ -866,7 +772,7 @@ pll_restor_fail: if (phy->cfg->ops.disable) phy->cfg->ops.disable(phy); phy_en_fail: - dsi_phy_regulator_disable(phy); + regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies); reg_en_fail: dsi_phy_disable_resource(phy); res_en_fail: @@ -880,7 +786,7 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy) phy->cfg->ops.disable(phy); - dsi_phy_regulator_disable(phy); + regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies); dsi_phy_disable_resource(phy); } diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index dc91b43d5a38..60a99c6525b2 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -29,7 +29,8 @@ struct msm_dsi_phy_ops { }; struct msm_dsi_phy_cfg { - struct dsi_reg_config reg_cfg; + const struct regulator_bulk_data *regulator_data; + int num_regulators; struct msm_dsi_phy_ops ops; unsigned long min_pll_rate; @@ -98,7 +99,7 @@ struct msm_dsi_phy { int id; struct clk *ahb_clk; - struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX]; + struct regulator_bulk_data *supplies; struct msm_dsi_dphy_timing timing; const struct msm_dsi_phy_cfg *cfg; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c index 08b015ea1b1e..27b592c776a3 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c @@ -188,19 +188,19 @@ static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config * pr_debug("SSC is enabled\n"); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1, - config->ssc_stepsize & 0xff); + config->ssc_stepsize & 0xff); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1, - config->ssc_stepsize >> 8); + config->ssc_stepsize >> 8); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1, - config->ssc_div_per & 0xff); + config->ssc_div_per & 0xff); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1, - config->ssc_div_per >> 8); + config->ssc_div_per >> 8); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1, - config->ssc_adj_per & 0xff); + config->ssc_adj_per & 0xff); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1, - config->ssc_adj_per >> 8); + config->ssc_adj_per >> 8); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL, - SSC_EN | (config->ssc_center ? SSC_CENTER : 0)); + SSC_EN | (config->ssc_center ? SSC_CENTER : 0)); } } @@ -215,16 +215,19 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll) dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE, - 0xba); - dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c); + 0xba); + dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, + 0x0c); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00); - dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08); + dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, + 0x08); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0); - dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa); + dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, + 0xfa); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, - 0x4c); + 0x4c); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f); @@ -236,18 +239,18 @@ static void dsi_pll_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *conf dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1, - config->decimal_div_start); + config->decimal_div_start); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1, - config->frac_div_start & 0xff); + config->frac_div_start & 0xff); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1, - (config->frac_div_start & 0xff00) >> 8); + (config->frac_div_start & 0xff00) >> 8); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1, - (config->frac_div_start & 0x30000) >> 16); + (config->frac_div_start & 0x30000) >> 16); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 64); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10); dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS, - config->pll_clock_inverters); + config->pll_clock_inverters); } static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, @@ -306,7 +309,7 @@ static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll) dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0); dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0, - data & ~BIT(5)); + data & ~BIT(5)); ndelay(250); } @@ -315,7 +318,7 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll) u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0); dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0, - data | BIT(5)); + data | BIT(5)); dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0); ndelay(250); } @@ -326,7 +329,7 @@ static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll) data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, - data & ~BIT(5)); + data & ~BIT(5)); } static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll) @@ -335,7 +338,7 @@ static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll) data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, - data | BIT(5)); + data | BIT(5)); } static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw) @@ -356,7 +359,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw) /* Start PLL */ dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, - 0x01); + 0x01); /* * ensure all PLL configurations are written prior to checking @@ -378,10 +381,10 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw) dsi_pll_enable_global_clk(pll_10nm->slave); dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, - 0x01); + 0x01); if (pll_10nm->slave) dsi_phy_write(pll_10nm->slave->phy->base + - REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01); + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01); error: return rc; @@ -486,7 +489,7 @@ static void dsi_10nm_pll_save_state(struct msm_dsi_phy *phy) u32 cmn_clk_cfg0, cmn_clk_cfg1; cached->pll_out_div = dsi_phy_read(pll_10nm->phy->pll_base + - REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE); + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE); cached->pll_out_div &= 0x3; cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0); @@ -515,7 +518,7 @@ static int dsi_10nm_pll_restore_state(struct msm_dsi_phy *phy) dsi_phy_write(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val); dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0, - cached->bit_clk_div | (cached->pix_clk_div << 4)); + cached->bit_clk_div | (cached->pix_clk_div << 4)); val = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); val &= ~0x3; @@ -571,64 +574,59 @@ static int dsi_10nm_set_usecase(struct msm_dsi_phy *phy) */ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **provided_clocks) { - char clk_name[32], parent[32], vco_name[32]; - char parent2[32], parent3[32], parent4[32]; + char clk_name[32]; struct clk_init_data vco_init = { .parent_data = &(const struct clk_parent_data) { .fw_name = "ref", }, .num_parents = 1, - .name = vco_name, + .name = clk_name, .flags = CLK_IGNORE_UNUSED, .ops = &clk_ops_dsi_pll_10nm_vco, }; struct device *dev = &pll_10nm->phy->pdev->dev; - struct clk_hw *hw; + struct clk_hw *hw, *pll_out_div, *pll_bit, *pll_by_2_bit; + struct clk_hw *pll_post_out_div, *pclk_mux; int ret; DBG("DSI%d", pll_10nm->phy->id); - snprintf(vco_name, 32, "dsi%dvco_clk", pll_10nm->phy->id); + snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_10nm->phy->id); pll_10nm->clk_hw.init = &vco_init; ret = devm_clk_hw_register(dev, &pll_10nm->clk_hw); if (ret) return ret; - snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id); - snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->phy->id); + snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_out_div_clk", pll_10nm->phy->id); - hw = devm_clk_hw_register_divider(dev, clk_name, - parent, CLK_SET_RATE_PARENT, - pll_10nm->phy->pll_base + - REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, - 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL); - if (IS_ERR(hw)) { - ret = PTR_ERR(hw); + pll_out_div = devm_clk_hw_register_divider_parent_hw(dev, clk_name, + &pll_10nm->clk_hw, CLK_SET_RATE_PARENT, + pll_10nm->phy->pll_base + + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, + 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL); + if (IS_ERR(pll_out_div)) { + ret = PTR_ERR(pll_out_div); goto fail; } - snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id); - snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id); + snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_bit_clk", pll_10nm->phy->id); /* BIT CLK: DIV_CTRL_3_0 */ - hw = devm_clk_hw_register_divider(dev, clk_name, parent, - CLK_SET_RATE_PARENT, - pll_10nm->phy->base + - REG_DSI_10nm_PHY_CMN_CLK_CFG0, - 0, 4, CLK_DIVIDER_ONE_BASED, - &pll_10nm->postdiv_lock); - if (IS_ERR(hw)) { - ret = PTR_ERR(hw); + pll_bit = devm_clk_hw_register_divider_parent_hw(dev, clk_name, + pll_out_div, CLK_SET_RATE_PARENT, + pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG0, + 0, 4, CLK_DIVIDER_ONE_BASED, &pll_10nm->postdiv_lock); + if (IS_ERR(pll_bit)) { + ret = PTR_ERR(pll_bit); goto fail; } - snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->phy->id); - snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id); + snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_byteclk", pll_10nm->phy->id); /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */ - hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, - CLK_SET_RATE_PARENT, 1, 8); + hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name, + pll_bit, CLK_SET_RATE_PARENT, 1, 8); if (IS_ERR(hw)) { ret = PTR_ERR(hw); goto fail; @@ -636,52 +634,45 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **prov provided_clocks[DSI_BYTE_PLL_CLK] = hw; - snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id); - snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id); + snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id); - hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, - 0, 1, 2); - if (IS_ERR(hw)) { - ret = PTR_ERR(hw); + pll_by_2_bit = devm_clk_hw_register_fixed_factor_parent_hw(dev, + clk_name, pll_bit, 0, 1, 2); + if (IS_ERR(pll_by_2_bit)) { + ret = PTR_ERR(pll_by_2_bit); goto fail; } - snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id); - snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id); + snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id); - hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, - 0, 1, 4); - if (IS_ERR(hw)) { - ret = PTR_ERR(hw); + pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(dev, + clk_name, pll_out_div, 0, 1, 4); + if (IS_ERR(pll_post_out_div)) { + ret = PTR_ERR(pll_post_out_div); goto fail; } - snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->phy->id); - snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id); - snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id); - snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id); - snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id); - - hw = devm_clk_hw_register_mux(dev, clk_name, - ((const char *[]){ - parent, parent2, parent3, parent4 - }), 4, 0, pll_10nm->phy->base + - REG_DSI_10nm_PHY_CMN_CLK_CFG1, - 0, 2, 0, NULL); - if (IS_ERR(hw)) { - ret = PTR_ERR(hw); + snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_10nm->phy->id); + + pclk_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name, + ((const struct clk_hw *[]){ + pll_bit, + pll_by_2_bit, + pll_out_div, + pll_post_out_div, + }), 4, 0, pll_10nm->phy->base + + REG_DSI_10nm_PHY_CMN_CLK_CFG1, 0, 2, 0, NULL); + if (IS_ERR(pclk_mux)) { + ret = PTR_ERR(pclk_mux); goto fail; } - snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->phy->id); - snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->phy->id); + snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_dsiclk", pll_10nm->phy->id); /* PIX CLK DIV : DIV_CTRL_7_4*/ - hw = devm_clk_hw_register_divider(dev, clk_name, parent, - 0, pll_10nm->phy->base + - REG_DSI_10nm_PHY_CMN_CLK_CFG0, - 4, 4, CLK_DIVIDER_ONE_BASED, - &pll_10nm->postdiv_lock); + hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name, pclk_mux, + 0, pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG0, + 4, 4, CLK_DIVIDER_ONE_BASED, &pll_10nm->postdiv_lock); if (IS_ERR(hw)) { ret = PTR_ERR(hw); goto fail; @@ -1028,14 +1019,14 @@ static int dsi_10nm_phy_parse_dt(struct msm_dsi_phy *phy) return 0; } +static const struct regulator_bulk_data dsi_phy_10nm_regulators[] = { + { .supply = "vdds", .init_load_uA = 36000 }, +}; + const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = { .has_phy_lane = true, - .reg_cfg = { - .num = 1, - .regs = { - {"vdds", 36000, 32}, - }, - }, + .regulator_data = dsi_phy_10nm_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_10nm_regulators), .ops = { .enable = dsi_10nm_phy_enable, .disable = dsi_10nm_phy_disable, @@ -1052,12 +1043,8 @@ const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = { const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = { .has_phy_lane = true, - .reg_cfg = { - .num = 1, - .regs = { - {"vdds", 36000, 32}, - }, - }, + .regulator_data = dsi_phy_10nm_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_10nm_regulators), .ops = { .enable = dsi_10nm_phy_enable, .disable = dsi_10nm_phy_disable, diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c index 8199c53567f4..0f8f4ca46429 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c @@ -711,7 +711,7 @@ static int dsi_14nm_pll_restore_state(struct msm_dsi_phy *phy) cached_state->vco_rate, 0); if (ret) { DRM_DEV_ERROR(&pll_14nm->phy->pdev->dev, - "restore vco rate failed. ret=%d\n", ret); + "restore vco rate failed. ret=%d\n", ret); return ret; } @@ -764,14 +764,14 @@ static int dsi_14nm_set_usecase(struct msm_dsi_phy *phy) static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm, const char *name, - const char *parent_name, + const struct clk_hw *parent_hw, unsigned long flags, u8 shift) { struct dsi_pll_14nm_postdiv *pll_postdiv; struct device *dev = &pll_14nm->phy->pdev->dev; struct clk_init_data postdiv_init = { - .parent_names = (const char *[]) { parent_name }, + .parent_hws = (const struct clk_hw *[]) { parent_hw }, .num_parents = 1, .name = name, .flags = flags, @@ -800,72 +800,70 @@ static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm, static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm, struct clk_hw **provided_clocks) { - char clk_name[32], parent[32], vco_name[32]; + char clk_name[32]; struct clk_init_data vco_init = { .parent_data = &(const struct clk_parent_data) { .fw_name = "ref", }, .num_parents = 1, - .name = vco_name, + .name = clk_name, .flags = CLK_IGNORE_UNUSED, .ops = &clk_ops_dsi_pll_14nm_vco, }; struct device *dev = &pll_14nm->phy->pdev->dev; - struct clk_hw *hw; + struct clk_hw *hw, *n1_postdiv, *n1_postdivby2; int ret; DBG("DSI%d", pll_14nm->phy->id); - snprintf(vco_name, 32, "dsi%dvco_clk", pll_14nm->phy->id); + snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_14nm->phy->id); pll_14nm->clk_hw.init = &vco_init; ret = devm_clk_hw_register(dev, &pll_14nm->clk_hw); if (ret) return ret; - snprintf(clk_name, 32, "dsi%dn1_postdiv_clk", pll_14nm->phy->id); - snprintf(parent, 32, "dsi%dvco_clk", pll_14nm->phy->id); + snprintf(clk_name, sizeof(clk_name), "dsi%dn1_postdiv_clk", pll_14nm->phy->id); /* N1 postdiv, bits 0-3 in REG_DSI_14nm_PHY_CMN_CLK_CFG0 */ - hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, - CLK_SET_RATE_PARENT, 0); - if (IS_ERR(hw)) - return PTR_ERR(hw); + n1_postdiv = pll_14nm_postdiv_register(pll_14nm, clk_name, + &pll_14nm->clk_hw, CLK_SET_RATE_PARENT, 0); + if (IS_ERR(n1_postdiv)) + return PTR_ERR(n1_postdiv); - snprintf(clk_name, 32, "dsi%dpllbyte", pll_14nm->phy->id); - snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->phy->id); + snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_14nm->phy->id); /* DSI Byte clock = VCO_CLK / N1 / 8 */ - hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, - CLK_SET_RATE_PARENT, 1, 8); + hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name, + n1_postdiv, CLK_SET_RATE_PARENT, 1, 8); if (IS_ERR(hw)) return PTR_ERR(hw); provided_clocks[DSI_BYTE_PLL_CLK] = hw; - snprintf(clk_name, 32, "dsi%dn1_postdivby2_clk", pll_14nm->phy->id); - snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->phy->id); + snprintf(clk_name, sizeof(clk_name), "dsi%dn1_postdivby2_clk", pll_14nm->phy->id); /* * Skip the mux for now, force DSICLK_SEL to 1, Add a /2 divider * on the way. Don't let it set parent. */ - hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 2); - if (IS_ERR(hw)) - return PTR_ERR(hw); + n1_postdivby2 = devm_clk_hw_register_fixed_factor_parent_hw(dev, + clk_name, n1_postdiv, 0, 1, 2); + if (IS_ERR(n1_postdivby2)) + return PTR_ERR(n1_postdivby2); - snprintf(clk_name, 32, "dsi%dpll", pll_14nm->phy->id); - snprintf(parent, 32, "dsi%dn1_postdivby2_clk", pll_14nm->phy->id); + snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_14nm->phy->id); /* DSI pixel clock = VCO_CLK / N1 / 2 / N2 * This is the output of N2 post-divider, bits 4-7 in * REG_DSI_14nm_PHY_CMN_CLK_CFG0. Don't let it set parent. */ - hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, 0, 4); + hw = pll_14nm_postdiv_register(pll_14nm, clk_name, n1_postdivby2, + 0, 4); if (IS_ERR(hw)) return PTR_ERR(hw); - provided_clocks[DSI_PIXEL_PLL_CLK] = hw; + provided_clocks[DSI_PIXEL_PLL_CLK] = hw; return 0; } @@ -952,7 +950,8 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) { DRM_DEV_ERROR(&phy->pdev->dev, - "%s: D-PHY timing calculation failed\n", __func__); + "%s: D-PHY timing calculation failed\n", + __func__); return -EINVAL; } @@ -1005,7 +1004,7 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, ret = dsi_14nm_set_usecase(phy); if (ret) { DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n", - __func__, ret); + __func__, ret); return ret; } @@ -1024,14 +1023,18 @@ static void dsi_14nm_phy_disable(struct msm_dsi_phy *phy) wmb(); } +static const struct regulator_bulk_data dsi_phy_14nm_17mA_regulators[] = { + { .supply = "vcca", .init_load_uA = 17000 }, +}; + +static const struct regulator_bulk_data dsi_phy_14nm_73p4mA_regulators[] = { + { .supply = "vcca", .init_load_uA = 73400 }, +}; + const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = { .has_phy_lane = true, - .reg_cfg = { - .num = 1, - .regs = { - {"vcca", 17000, 32}, - }, - }, + .regulator_data = dsi_phy_14nm_17mA_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_14nm_17mA_regulators), .ops = { .enable = dsi_14nm_phy_enable, .disable = dsi_14nm_phy_disable, @@ -1047,12 +1050,8 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = { const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = { .has_phy_lane = true, - .reg_cfg = { - .num = 1, - .regs = { - {"vcca", 73400, 32}, - }, - }, + .regulator_data = dsi_phy_14nm_73p4mA_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_14nm_73p4mA_regulators), .ops = { .enable = dsi_14nm_phy_enable, .disable = dsi_14nm_phy_disable, @@ -1068,12 +1067,8 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = { const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs = { .has_phy_lane = true, - .reg_cfg = { - .num = 1, - .regs = { - {"vcca", 17000, 32}, - }, - }, + .regulator_data = dsi_phy_14nm_17mA_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_14nm_17mA_regulators), .ops = { .enable = dsi_14nm_phy_enable, .disable = dsi_14nm_phy_disable, diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c index ee7c418a1c29..c9752b991744 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c @@ -129,15 +129,15 @@ static void dsi_20nm_phy_disable(struct msm_dsi_phy *phy) dsi_20nm_phy_regulator_ctrl(phy, false); } +static const struct regulator_bulk_data dsi_phy_20nm_regulators[] = { + { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */ + { .supply = "vcca", .init_load_uA = 10000 }, /* 1.0 V */ +}; + const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = { .has_phy_regulator = true, - .reg_cfg = { - .num = 2, - .regs = { - {"vddio", 100000, 100}, /* 1.8 V */ - {"vcca", 10000, 100}, /* 1.0 V */ - }, - }, + .regulator_data = dsi_phy_20nm_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_20nm_regulators), .ops = { .enable = dsi_20nm_phy_enable, .disable = dsi_20nm_phy_disable, diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c index 48eab80b548e..4c1bf55c5f38 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c @@ -104,7 +104,7 @@ static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm) * reset bit off and back on. */ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, - DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1); + DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1); dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1); } @@ -201,9 +201,9 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate, dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1); dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2, - DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2)); + DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2)); dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3, - DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3)); + DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3)); dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00); /* Add hardware recommended delay for correct PLL configuration */ @@ -316,12 +316,12 @@ static int _dsi_pll_28nm_vco_prepare_hpm(struct dsi_pll_28nm *pll_28nm) for (i = 0; i < 2; i++) { /* DSI Uniphy lock detect setting */ dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, - 0x0c, 100); + 0x0c, 100); dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d); /* poll for PLL ready status */ - locked = pll_28nm_poll_for_ready(pll_28nm, - max_reads, timeout_us); + locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, + timeout_us); if (locked) break; @@ -508,28 +508,28 @@ static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy) } dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG, - cached_state->postdiv3); + cached_state->postdiv3); dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG, - cached_state->postdiv1); + cached_state->postdiv1); dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG, - cached_state->byte_mux); + cached_state->byte_mux); return 0; } static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks) { - char clk_name[32], parent1[32], parent2[32], vco_name[32]; + char clk_name[32]; struct clk_init_data vco_init = { .parent_data = &(const struct clk_parent_data) { .fw_name = "ref", .name = "xo", }, .num_parents = 1, - .name = vco_name, + .name = clk_name, .flags = CLK_IGNORE_UNUSED, }; struct device *dev = &pll_28nm->phy->pdev->dev; - struct clk_hw *hw; + struct clk_hw *hw, *analog_postdiv, *indirect_path_div2, *byte_mux; int ret; DBG("%d", pll_28nm->phy->id); @@ -539,55 +539,49 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov else vco_init.ops = &clk_ops_dsi_pll_28nm_vco_hpm; - snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->phy->id); + snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id); pll_28nm->clk_hw.init = &vco_init; ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw); if (ret) return ret; - snprintf(clk_name, 32, "dsi%danalog_postdiv_clk", pll_28nm->phy->id); - snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id); - hw = devm_clk_hw_register_divider(dev, clk_name, - parent1, CLK_SET_RATE_PARENT, + snprintf(clk_name, sizeof(clk_name), "dsi%danalog_postdiv_clk", pll_28nm->phy->id); + analog_postdiv = devm_clk_hw_register_divider_parent_hw(dev, clk_name, + &pll_28nm->clk_hw, CLK_SET_RATE_PARENT, pll_28nm->phy->pll_base + - REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG, + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG, 0, 4, 0, NULL); - if (IS_ERR(hw)) - return PTR_ERR(hw); - - snprintf(clk_name, 32, "dsi%dindirect_path_div2_clk", pll_28nm->phy->id); - snprintf(parent1, 32, "dsi%danalog_postdiv_clk", pll_28nm->phy->id); - hw = devm_clk_hw_register_fixed_factor(dev, clk_name, - parent1, CLK_SET_RATE_PARENT, - 1, 2); - if (IS_ERR(hw)) - return PTR_ERR(hw); - - snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id); - snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id); - hw = devm_clk_hw_register_divider(dev, clk_name, - parent1, 0, pll_28nm->phy->pll_base + + if (IS_ERR(analog_postdiv)) + return PTR_ERR(analog_postdiv); + + snprintf(clk_name, sizeof(clk_name), "dsi%dindirect_path_div2_clk", pll_28nm->phy->id); + indirect_path_div2 = devm_clk_hw_register_fixed_factor_parent_hw(dev, + clk_name, analog_postdiv, CLK_SET_RATE_PARENT, 1, 2); + if (IS_ERR(indirect_path_div2)) + return PTR_ERR(indirect_path_div2); + + snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id); + hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name, + &pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG, - 0, 8, 0, NULL); + 0, 8, 0, NULL); if (IS_ERR(hw)) return PTR_ERR(hw); provided_clocks[DSI_PIXEL_PLL_CLK] = hw; - snprintf(clk_name, 32, "dsi%dbyte_mux", pll_28nm->phy->id); - snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id); - snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->phy->id); - hw = devm_clk_hw_register_mux(dev, clk_name, - ((const char *[]){ - parent1, parent2 + snprintf(clk_name, sizeof(clk_name), "dsi%dbyte_mux", pll_28nm->phy->id); + byte_mux = devm_clk_hw_register_mux_parent_hws(dev, clk_name, + ((const struct clk_hw *[]){ + &pll_28nm->clk_hw, + indirect_path_div2, }), 2, CLK_SET_RATE_PARENT, pll_28nm->phy->pll_base + - REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL); - if (IS_ERR(hw)) - return PTR_ERR(hw); + REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL); + if (IS_ERR(byte_mux)) + return PTR_ERR(byte_mux); - snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id); - snprintf(parent1, 32, "dsi%dbyte_mux", pll_28nm->phy->id); - hw = devm_clk_hw_register_fixed_factor(dev, clk_name, - parent1, CLK_SET_RATE_PARENT, 1, 4); + snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id); + hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name, + byte_mux, CLK_SET_RATE_PARENT, 1, 4); if (IS_ERR(hw)) return PTR_ERR(hw); provided_clocks[DSI_BYTE_PLL_CLK] = hw; @@ -627,31 +621,31 @@ static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy, void __iomem *base = phy->base; dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0, - DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero)); + DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero)); dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1, - DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail)); + DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail)); dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2, - DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare)); + DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare)); if (timing->clk_zero & BIT(8)) dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3, - DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8); + DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8); dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4, - DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit)); + DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit)); dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5, - DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero)); + DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero)); dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6, - DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare)); + DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare)); dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7, - DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail)); + DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail)); dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8, - DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst)); + DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst)); dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9, - DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) | - DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure)); + DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) | + DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure)); dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10, - DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get)); + DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get)); dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11, - DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0)); + DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0)); } static void dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy *phy) @@ -713,7 +707,8 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, if (msm_dsi_dphy_timing_calc(timing, clk_req)) { DRM_DEV_ERROR(&phy->pdev->dev, - "%s: D-PHY timing calculation failed\n", __func__); + "%s: D-PHY timing calculation failed\n", + __func__); return -EINVAL; } @@ -769,14 +764,14 @@ static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy) wmb(); } +static const struct regulator_bulk_data dsi_phy_28nm_regulators[] = { + { .supply = "vddio", .init_load_uA = 100000 }, +}; + const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = { .has_phy_regulator = true, - .reg_cfg = { - .num = 1, - .regs = { - {"vddio", 100000, 100}, - }, - }, + .regulator_data = dsi_phy_28nm_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators), .ops = { .enable = dsi_28nm_phy_enable, .disable = dsi_28nm_phy_disable, @@ -792,12 +787,8 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = { const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = { .has_phy_regulator = true, - .reg_cfg = { - .num = 1, - .regs = { - {"vddio", 100000, 100}, - }, - }, + .regulator_data = dsi_phy_28nm_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators), .ops = { .enable = dsi_28nm_phy_enable, .disable = dsi_28nm_phy_disable, @@ -813,12 +804,8 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = { const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = { .has_phy_regulator = true, - .reg_cfg = { - .num = 1, - .regs = { - {"vddio", 100000, 100}, /* 1.8 V */ - }, - }, + .regulator_data = dsi_phy_28nm_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators), .ops = { .enable = dsi_28nm_phy_enable, .disable = dsi_28nm_phy_disable, diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c index fc56cdcc9ad6..26c08047e20c 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c @@ -104,29 +104,29 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate, fb_divider = (temp * VCO_PREF_DIV_RATIO) / val; fb_divider = fb_divider / 2 - 1; dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1, - fb_divider & 0xff); + fb_divider & 0xff); val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2); val |= (fb_divider >> 8) & 0x07; dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2, - val); + val); val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3); val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f; dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3, - val); + val); dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6, - 0xf); + 0xf); val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8); val |= 0x7 << 4; dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8, - val); + val); return 0; } @@ -206,7 +206,7 @@ static int dsi_pll_28nm_vco_prepare(struct clk_hw *hw) /* enable the PLL */ dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0, - DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE); + DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE); locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us); @@ -367,23 +367,23 @@ static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy) cached_state->vco_rate, 0); if (ret) { DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev, - "restore vco rate failed. ret=%d\n", ret); + "restore vco rate failed. ret=%d\n", ret); return ret; } dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10, - cached_state->postdiv3); + cached_state->postdiv3); dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9, - cached_state->postdiv2); + cached_state->postdiv2); dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8, - cached_state->postdiv1); + cached_state->postdiv1); return 0; } static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks) { - char *clk_name, *parent_name, *vco_name; + char clk_name[32]; struct clk_init_data vco_init = { .parent_data = &(const struct clk_parent_data) { .fw_name = "ref", @@ -404,20 +404,8 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov if (!bytediv) return -ENOMEM; - vco_name = devm_kzalloc(dev, 32, GFP_KERNEL); - if (!vco_name) - return -ENOMEM; - - parent_name = devm_kzalloc(dev, 32, GFP_KERNEL); - if (!parent_name) - return -ENOMEM; - - clk_name = devm_kzalloc(dev, 32, GFP_KERNEL); - if (!clk_name) - return -ENOMEM; - - snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->phy->id); - vco_init.name = vco_name; + snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_28nm->phy->id); + vco_init.name = clk_name; pll_28nm->clk_hw.init = &vco_init; @@ -429,13 +417,14 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov bytediv->hw.init = &bytediv_init; bytediv->reg = pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9; - snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->phy->id); - snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id + 1); + snprintf(clk_name, sizeof(clk_name), "dsi%dpllbyte", pll_28nm->phy->id + 1); bytediv_init.name = clk_name; bytediv_init.ops = &clk_bytediv_ops; bytediv_init.flags = CLK_SET_RATE_PARENT; - bytediv_init.parent_names = (const char * const *) &parent_name; + bytediv_init.parent_hws = (const struct clk_hw*[]){ + &pll_28nm->clk_hw, + }; bytediv_init.num_parents = 1; /* DIV2 */ @@ -444,12 +433,12 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov return ret; provided_clocks[DSI_BYTE_PLL_CLK] = &bytediv->hw; - snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id + 1); + snprintf(clk_name, sizeof(clk_name), "dsi%dpll", pll_28nm->phy->id + 1); /* DIV3 */ - hw = devm_clk_hw_register_divider(dev, clk_name, - parent_name, 0, pll_28nm->phy->pll_base + + hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name, + &pll_28nm->clk_hw, 0, pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10, - 0, 8, 0, NULL); + 0, 8, 0, NULL); if (IS_ERR(hw)) return PTR_ERR(hw); provided_clocks[DSI_PIXEL_PLL_CLK] = hw; @@ -489,29 +478,29 @@ static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy, void __iomem *base = phy->base; dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_0, - DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero)); + DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero)); dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_1, - DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail)); + DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail)); dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_2, - DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare)); + DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare)); dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_3, 0x0); dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_4, - DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit)); + DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit)); dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_5, - DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero)); + DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero)); dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_6, - DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare)); + DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare)); dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_7, - DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail)); + DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail)); dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_8, - DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst)); + DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst)); dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_9, - DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) | - DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure)); + DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) | + DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure)); dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_10, - DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get)); + DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get)); dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_11, - DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(0)); + DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(0)); } static void dsi_28nm_phy_regulator_init(struct msm_dsi_phy *phy) @@ -523,7 +512,7 @@ static void dsi_28nm_phy_regulator_init(struct msm_dsi_phy *phy) dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 1); dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0); dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4, - 0x100); + 0x100); } static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy) @@ -544,7 +533,7 @@ static void dsi_28nm_phy_calibration(struct msm_dsi_phy *phy) int i = 5000; dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG, - 0x3); + 0x3); dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2, 0x0); dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1, 0x5a); @@ -577,11 +566,11 @@ static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy) dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_1(i), 0x45); dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_2(i), 0x00); dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(i), - 0x00); + 0x00); dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(i), - 0x01); + 0x01); dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(i), - 0x66); + 0x66); } dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_0, 0x40); @@ -602,7 +591,8 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, if (msm_dsi_dphy_timing_calc(timing, clk_req)) { DRM_DEV_ERROR(&phy->pdev->dev, - "%s: D-PHY timing calculation failed\n", __func__); + "%s: D-PHY timing calculation failed\n", + __func__); return -EINVAL; } @@ -648,14 +638,14 @@ static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy) wmb(); } +static const struct regulator_bulk_data dsi_phy_28nm_8960_regulators[] = { + { .supply = "vddio", .init_load_uA = 100000 }, /* 1.8 V */ +}; + const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = { .has_phy_regulator = true, - .reg_cfg = { - .num = 1, - .regs = { - {"vddio", 100000, 100}, /* 1.8 V */ - }, - }, + .regulator_data = dsi_phy_28nm_8960_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_28nm_8960_regulators), .ops = { .enable = dsi_28nm_phy_enable, .disable = dsi_28nm_phy_disable, diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c index 66ed1919a1db..9e7fa7d88ead 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c @@ -176,19 +176,19 @@ static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *c pr_debug("SSC is enabled\n"); dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1, - config->ssc_stepsize & 0xff); + config->ssc_stepsize & 0xff); dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1, - config->ssc_stepsize >> 8); + config->ssc_stepsize >> 8); dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1, - config->ssc_div_per & 0xff); + config->ssc_div_per & 0xff); dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1, - config->ssc_div_per >> 8); + config->ssc_div_per >> 8); dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1, - config->ssc_adj_per & 0xff); + config->ssc_adj_per & 0xff); dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1, - config->ssc_adj_per >> 8); + config->ssc_adj_per >> 8); dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_CONTROL, - SSC_EN | (config->ssc_center ? SSC_CENTER : 0)); + SSC_EN | (config->ssc_center ? SSC_CENTER : 0)); } } @@ -208,7 +208,7 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll) } dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1, - analog_controls_five_1); + analog_controls_five_1); dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1, vco_config_1); dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE, 0x01); dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03); @@ -245,17 +245,20 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *confi void __iomem *base = pll->phy->pll_base; dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12); - dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1, config->decimal_div_start); + dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1, + config->decimal_div_start); dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1, - config->frac_div_start & 0xff); + config->frac_div_start & 0xff); dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1, - (config->frac_div_start & 0xff00) >> 8); + (config->frac_div_start & 0xff00) >> 8); dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1, - (config->frac_div_start & 0x30000) >> 16); + (config->frac_div_start & 0x30000) >> 16); dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40); dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06); - dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, pll->phy->cphy_mode ? 0x00 : 0x10); - dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, config->pll_clock_inverters); + dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, + pll->phy->cphy_mode ? 0x00 : 0x10); + dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, + config->pll_clock_inverters); } static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, @@ -341,7 +344,7 @@ static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll) data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, - data | BIT(5) | BIT(4)); + data | BIT(5) | BIT(4)); } static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll) @@ -500,7 +503,7 @@ static void dsi_7nm_pll_save_state(struct msm_dsi_phy *phy) u32 cmn_clk_cfg0, cmn_clk_cfg1; cached->pll_out_div = dsi_phy_read(pll_7nm->phy->pll_base + - REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE); + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE); cached->pll_out_div &= 0x3; cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0); @@ -529,7 +532,7 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy) dsi_phy_write(pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, val); dsi_phy_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0, - cached->bit_clk_div | (cached->pix_clk_div << 4)); + cached->bit_clk_div | (cached->pix_clk_div << 4)); val = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); val &= ~0x3; @@ -585,65 +588,60 @@ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy) */ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provided_clocks) { - char clk_name[32], parent[32], vco_name[32]; - char parent2[32]; + char clk_name[32]; struct clk_init_data vco_init = { .parent_data = &(const struct clk_parent_data) { .fw_name = "ref", }, .num_parents = 1, - .name = vco_name, + .name = clk_name, .flags = CLK_IGNORE_UNUSED, .ops = &clk_ops_dsi_pll_7nm_vco, }; struct device *dev = &pll_7nm->phy->pdev->dev; - struct clk_hw *hw; + struct clk_hw *hw, *pll_out_div, *pll_bit, *pll_by_2_bit; + struct clk_hw *pll_post_out_div, *phy_pll_out_dsi_parent; int ret; DBG("DSI%d", pll_7nm->phy->id); - snprintf(vco_name, 32, "dsi%dvco_clk", pll_7nm->phy->id); + snprintf(clk_name, sizeof(clk_name), "dsi%dvco_clk", pll_7nm->phy->id); pll_7nm->clk_hw.init = &vco_init; ret = devm_clk_hw_register(dev, &pll_7nm->clk_hw); if (ret) return ret; - snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id); - snprintf(parent, 32, "dsi%dvco_clk", pll_7nm->phy->id); + snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_out_div_clk", pll_7nm->phy->id); - hw = devm_clk_hw_register_divider(dev, clk_name, - parent, CLK_SET_RATE_PARENT, - pll_7nm->phy->pll_base + - REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, - 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL); - if (IS_ERR(hw)) { - ret = PTR_ERR(hw); + pll_out_div = devm_clk_hw_register_divider_parent_hw(dev, clk_name, + &pll_7nm->clk_hw, CLK_SET_RATE_PARENT, + pll_7nm->phy->pll_base + + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, + 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL); + if (IS_ERR(pll_out_div)) { + ret = PTR_ERR(pll_out_div); goto fail; } - snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id); - snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id); + snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_bit_clk", pll_7nm->phy->id); /* BIT CLK: DIV_CTRL_3_0 */ - hw = devm_clk_hw_register_divider(dev, clk_name, parent, - CLK_SET_RATE_PARENT, - pll_7nm->phy->base + - REG_DSI_7nm_PHY_CMN_CLK_CFG0, - 0, 4, CLK_DIVIDER_ONE_BASED, - &pll_7nm->postdiv_lock); - if (IS_ERR(hw)) { - ret = PTR_ERR(hw); + pll_bit = devm_clk_hw_register_divider_parent_hw(dev, clk_name, + pll_out_div, CLK_SET_RATE_PARENT, + pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0, + 0, 4, CLK_DIVIDER_ONE_BASED, &pll_7nm->postdiv_lock); + if (IS_ERR(pll_bit)) { + ret = PTR_ERR(pll_bit); goto fail; } - snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_7nm->phy->id); - snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id); + snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_byteclk", pll_7nm->phy->id); /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */ - hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, - CLK_SET_RATE_PARENT, 1, - pll_7nm->phy->cphy_mode ? 7 : 8); + hw = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk_name, + pll_bit, CLK_SET_RATE_PARENT, 1, + pll_7nm->phy->cphy_mode ? 7 : 8); if (IS_ERR(hw)) { ret = PTR_ERR(hw); goto fail; @@ -651,25 +649,25 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide provided_clocks[DSI_BYTE_PLL_CLK] = hw; - snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id); - snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id); + snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id); - hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, - 0, 1, 2); - if (IS_ERR(hw)) { - ret = PTR_ERR(hw); + pll_by_2_bit = devm_clk_hw_register_fixed_factor_parent_hw(dev, + clk_name, pll_bit, 0, 1, 2); + if (IS_ERR(pll_by_2_bit)) { + ret = PTR_ERR(pll_by_2_bit); goto fail; } - snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id); - snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id); + snprintf(clk_name, sizeof(clk_name), "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id); if (pll_7nm->phy->cphy_mode) - hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 2, 7); + pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw( + dev, clk_name, pll_out_div, 0, 2, 7); else - hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 4); - if (IS_ERR(hw)) { - ret = PTR_ERR(hw); + pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw( + dev, clk_name, pll_out_div, 0, 1, 4); + if (IS_ERR(pll_post_out_div)) { + ret = PTR_ERR(pll_post_out_div); goto fail; } @@ -682,34 +680,32 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide data = dsi_phy_read(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); dsi_phy_write(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data | 3); - snprintf(parent, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id); + phy_pll_out_dsi_parent = pll_post_out_div; } else { - snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->phy->id); - snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id); - snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id); - - hw = devm_clk_hw_register_mux(dev, clk_name, - ((const char *[]){ - parent, parent2, - }), 2, 0, pll_7nm->phy->base + + snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_7nm->phy->id); + + hw = devm_clk_hw_register_mux_parent_hws(dev, clk_name, + ((const struct clk_hw *[]){ + pll_bit, + pll_by_2_bit, + }), 2, 0, pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, - 0, 1, 0, NULL); + 0, 1, 0, NULL); if (IS_ERR(hw)) { ret = PTR_ERR(hw); goto fail; } - snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->phy->id); + phy_pll_out_dsi_parent = hw; } - snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_7nm->phy->id); + snprintf(clk_name, sizeof(clk_name), "dsi%d_phy_pll_out_dsiclk", pll_7nm->phy->id); /* PIX CLK DIV : DIV_CTRL_7_4*/ - hw = devm_clk_hw_register_divider(dev, clk_name, parent, - 0, pll_7nm->phy->base + - REG_DSI_7nm_PHY_CMN_CLK_CFG0, - 4, 4, CLK_DIVIDER_ONE_BASED, - &pll_7nm->postdiv_lock); + hw = devm_clk_hw_register_divider_parent_hw(dev, clk_name, + phy_pll_out_dsi_parent, 0, + pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0, + 4, 4, CLK_DIVIDER_ONE_BASED, &pll_7nm->postdiv_lock); if (IS_ERR(hw)) { ret = PTR_ERR(hw); goto fail; @@ -841,7 +837,7 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, ret = msm_dsi_dphy_timing_calc_v4(timing, clk_req); if (ret) { DRM_DEV_ERROR(&phy->pdev->dev, - "%s: PHY timing calculation failed\n", __func__); + "%s: PHY timing calculation failed\n", __func__); return -EINVAL; } @@ -960,10 +956,10 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00); dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit); dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5, - timing->shared_timings.clk_pre); + timing->shared_timings.clk_pre); dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->clk_prepare); dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7, - timing->shared_timings.clk_post); + timing->shared_timings.clk_post); dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst); dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02); dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04); @@ -982,9 +978,9 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04); dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00); dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12, - timing->shared_timings.clk_pre); + timing->shared_timings.clk_pre); dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13, - timing->shared_timings.clk_post); + timing->shared_timings.clk_post); } /* DSI lane settings */ @@ -1036,14 +1032,18 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy) DBG("DSI%d PHY disabled", phy->id); } +static const struct regulator_bulk_data dsi_phy_7nm_36mA_regulators[] = { + { .supply = "vdds", .init_load_uA = 36000 }, +}; + +static const struct regulator_bulk_data dsi_phy_7nm_37750uA_regulators[] = { + { .supply = "vdds", .init_load_uA = 37550 }, +}; + const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = { .has_phy_lane = true, - .reg_cfg = { - .num = 1, - .regs = { - {"vdds", 36000, 32}, - }, - }, + .regulator_data = dsi_phy_7nm_36mA_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_7nm_36mA_regulators), .ops = { .enable = dsi_7nm_phy_enable, .disable = dsi_7nm_phy_disable, @@ -1065,12 +1065,8 @@ const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = { const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = { .has_phy_lane = true, - .reg_cfg = { - .num = 1, - .regs = { - {"vdds", 36000, 32}, - }, - }, + .regulator_data = dsi_phy_7nm_36mA_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_7nm_36mA_regulators), .ops = { .enable = dsi_7nm_phy_enable, .disable = dsi_7nm_phy_disable, @@ -1087,12 +1083,8 @@ const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = { const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs = { .has_phy_lane = true, - .reg_cfg = { - .num = 1, - .regs = { - {"vdds", 37550, 0}, - }, - }, + .regulator_data = dsi_phy_7nm_37750uA_regulators, + .num_regulators = ARRAY_SIZE(dsi_phy_7nm_37750uA_regulators), .ops = { .enable = dsi_7nm_phy_enable, .disable = dsi_7nm_phy_disable, diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c index b06d9d25a189..4dd055416620 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c @@ -691,15 +691,13 @@ static const struct clk_ops hdmi_8996_pll_ops = { .is_enabled = hdmi_8996_pll_is_enabled, }; -static const char * const hdmi_pll_parents[] = { - "xo", -}; - static const struct clk_init_data pll_init = { .name = "hdmipll", .ops = &hdmi_8996_pll_ops, - .parent_names = hdmi_pll_parents, - .num_parents = ARRAY_SIZE(hdmi_pll_parents), + .parent_data = (const struct clk_parent_data[]){ + { .fw_name = "xo", .name = "xo_board" }, + }, + .num_parents = 1, .flags = CLK_IGNORE_UNUSED, }; @@ -707,8 +705,7 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct hdmi_pll_8996 *pll; - struct clk *clk; - int i; + int i, ret; pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL); if (!pll) @@ -735,10 +732,16 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev) } pll->clk_hw.init = &pll_init; - clk = devm_clk_register(dev, &pll->clk_hw); - if (IS_ERR(clk)) { + ret = devm_clk_hw_register(dev, &pll->clk_hw); + if (ret) { DRM_DEV_ERROR(dev, "failed to register pll clock\n"); - return -EINVAL; + return ret; + } + + ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &pll->clk_hw); + if (ret) { + DRM_DEV_ERROR(dev, "%s: failed to register clk provider: %d\n", __func__, ret); + return ret; } return 0; diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 7d2dab260f86..95f4374ae21c 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -7,6 +7,7 @@ #ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> +#include <linux/fault-inject.h> #include <drm/drm_debugfs.h> #include <drm/drm_file.h> @@ -326,6 +327,13 @@ void msm_debugfs_init(struct drm_minor *minor) if (priv->kms && priv->kms->funcs->debugfs_init) priv->kms->funcs->debugfs_init(priv->kms, minor); + +#ifdef CONFIG_FAULT_INJECTION + fault_create_debugfs_attr("fail_gem_alloc", minor->debugfs_root, + &fail_gem_alloc); + fault_create_debugfs_attr("fail_gem_iova", minor->debugfs_root, + &fail_gem_iova); +#endif } #endif diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 226d8d4629d2..28034c21f6bc 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -6,6 +6,7 @@ */ #include <linux/dma-mapping.h> +#include <linux/fault-inject.h> #include <linux/kthread.h> #include <linux/sched/mm.h> #include <linux/uaccess.h> @@ -78,6 +79,11 @@ static bool modeset = true; MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)"); module_param(modeset, bool, 0600); +#ifdef CONFIG_FAULT_INJECTION +DECLARE_FAULT_ATTR(fail_gem_alloc); +DECLARE_FAULT_ATTR(fail_gem_iova); +#endif + static irqreturn_t msm_irq(int irq, void *arg) { struct drm_device *dev = arg; @@ -418,14 +424,18 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) INIT_LIST_HEAD(&priv->objects); mutex_init(&priv->obj_lock); - INIT_LIST_HEAD(&priv->inactive_willneed); - INIT_LIST_HEAD(&priv->inactive_dontneed); - INIT_LIST_HEAD(&priv->inactive_unpinned); - mutex_init(&priv->mm_lock); + /* + * Initialize the LRUs: + */ + mutex_init(&priv->lru.lock); + drm_gem_lru_init(&priv->lru.unbacked, &priv->lru.lock); + drm_gem_lru_init(&priv->lru.pinned, &priv->lru.lock); + drm_gem_lru_init(&priv->lru.willneed, &priv->lru.lock); + drm_gem_lru_init(&priv->lru.dontneed, &priv->lru.lock); /* Teach lockdep about lock ordering wrt. shrinker: */ fs_reclaim_acquire(GFP_KERNEL); - might_lock(&priv->mm_lock); + might_lock(&priv->lru.lock); fs_reclaim_release(GFP_KERNEL); drm_mode_config_init(ddev); @@ -469,6 +479,8 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) } } + drm_helper_move_panel_connectors_to_head(ddev); + ddev->mode_config.funcs = &mode_config_funcs; ddev->mode_config.helper_private = &mode_config_helper_funcs; @@ -697,6 +709,9 @@ static int msm_ioctl_gem_new(struct drm_device *dev, void *data, flags |= MSM_BO_WC; } + if (should_fail(&fail_gem_alloc, args->size)) + return -ENOMEM; + return msm_gem_new_handle(dev, file, args->size, args->flags, &args->handle, NULL); } @@ -758,6 +773,9 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev, if (!priv->gpu) return -EINVAL; + if (should_fail(&fail_gem_iova, obj->size)) + return -ENOMEM; + /* * Don't pin the memory here - just get an address so that userspace can * be productive @@ -779,6 +797,9 @@ static int msm_ioctl_gem_info_set_iova(struct drm_device *dev, if (priv->gpu->aspace == ctx->aspace) return -EOPNOTSUPP; + if (should_fail(&fail_gem_iova, obj->size)) + return -ENOMEM; + return msm_gem_set_iova(obj, ctx->aspace, iova); } @@ -883,13 +904,13 @@ static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id, * retired, so if the fence is not found it means there is nothing * to wait for */ - ret = mutex_lock_interruptible(&queue->lock); + ret = mutex_lock_interruptible(&queue->idr_lock); if (ret) return ret; fence = idr_find(&queue->fence_idr, fence_id); if (fence) fence = dma_fence_get_rcu(fence); - mutex_unlock(&queue->lock); + mutex_unlock(&queue->idr_lock); if (!fence) return 0; diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 7d5fb0fc22dd..b2ea262296a4 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -33,6 +33,13 @@ #include <drm/msm_drm.h> #include <drm/drm_gem.h> +#ifdef CONFIG_FAULT_INJECTION +extern struct fault_attr fail_gem_alloc; +extern struct fault_attr fail_gem_iova; +#else +# define should_fail(attr, size) 0 +#endif + struct msm_kms; struct msm_gpu; struct msm_mmu; @@ -95,11 +102,6 @@ struct msm_drm_thread { struct kthread_worker *worker; }; -/* DSC config */ -struct msm_display_dsc_config { - struct drm_dsc_config *drm; -}; - struct msm_drm_private { struct drm_device *dev; @@ -141,28 +143,60 @@ struct msm_drm_private { struct mutex obj_lock; /** - * LRUs of inactive GEM objects. Every bo is either in one of the - * inactive lists (depending on whether or not it is shrinkable) or - * gpu->active_list (for the gpu it is active on[1]), or transiently - * on a temporary list as the shrinker is running. + * lru: * - * Note that inactive_willneed also contains pinned and vmap'd bos, - * but the number of pinned-but-not-active objects is small (scanout - * buffers, ringbuffer, etc). + * The various LRU's that a GEM object is in at various stages of + * it's lifetime. Objects start out in the unbacked LRU. When + * pinned (for scannout or permanently mapped GPU buffers, like + * ringbuffer, memptr, fw, etc) it moves to the pinned LRU. When + * unpinned, it moves into willneed or dontneed LRU depending on + * madvise state. When backing pages are evicted (willneed) or + * purged (dontneed) it moves back into the unbacked LRU. * - * These lists are protected by mm_lock (which should be acquired - * before per GEM object lock). One should *not* hold mm_lock in - * get_pages()/vmap()/etc paths, as they can trigger the shrinker. - * - * [1] if someone ever added support for the old 2d cores, there could be - * more than one gpu object + * The dontneed LRU is considered by the shrinker for objects + * that are candidate for purging, and the willneed LRU is + * considered for objects that could be evicted. */ - struct list_head inactive_willneed; /* inactive + potentially unpin/evictable */ - struct list_head inactive_dontneed; /* inactive + shrinkable */ - struct list_head inactive_unpinned; /* inactive + purged or unpinned */ - long shrinkable_count; /* write access under mm_lock */ - long evictable_count; /* write access under mm_lock */ - struct mutex mm_lock; + struct { + /** + * unbacked: + * + * The LRU for GEM objects without backing pages allocated. + * This mostly exists so that objects are always is one + * LRU. + */ + struct drm_gem_lru unbacked; + + /** + * pinned: + * + * The LRU for pinned GEM objects + */ + struct drm_gem_lru pinned; + + /** + * willneed: + * + * The LRU for unpinned GEM objects which are in madvise + * WILLNEED state (ie. can be evicted) + */ + struct drm_gem_lru willneed; + + /** + * dontneed: + * + * The LRU for unpinned GEM objects which are in madvise + * DONTNEED state (ie. can be purged) + */ + struct drm_gem_lru dontneed; + + /** + * lock: + * + * Protects manipulation of all of the LRUs. + */ + struct mutex lock; + } lru; struct workqueue_struct *wq; @@ -289,7 +323,7 @@ void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi); bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi); bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi); -struct msm_display_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi); +struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi); #else static inline void __init msm_dsi_register(void) { @@ -319,7 +353,7 @@ static inline bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi) return false; } -static inline struct msm_display_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi) +static inline struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi) { return NULL; } @@ -432,6 +466,8 @@ void __iomem *msm_ioremap_size(struct platform_device *pdev, const char *name, phys_addr_t *size); void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name); +struct icc_path *msm_icc_get(struct device *dev, const char *name); + #define msm_writel(data, addr) writel((data), (addr)) #define msm_readl(addr) readl((addr)) diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 8ddbd2e001d4..1dee0d18abbb 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -19,7 +19,7 @@ #include "msm_gpu.h" #include "msm_mmu.h" -static void update_inactive(struct msm_gem_object *msm_obj); +static void update_lru(struct drm_gem_object *obj); static dma_addr_t physaddr(struct drm_gem_object *obj) { @@ -97,7 +97,7 @@ static struct page **get_pages(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - GEM_WARN_ON(!msm_gem_is_locked(obj)); + msm_gem_assert_locked(obj); if (!msm_obj->pages) { struct drm_device *dev = obj->dev; @@ -132,7 +132,7 @@ static struct page **get_pages(struct drm_gem_object *obj) if (msm_obj->flags & MSM_BO_WC) sync_for_device(msm_obj); - update_inactive(msm_obj); + update_lru(obj); } return msm_obj->pages; @@ -174,40 +174,45 @@ static void put_pages(struct drm_gem_object *obj) put_pages_vram(obj); msm_obj->pages = NULL; + update_lru(obj); } } -struct page **msm_gem_get_pages(struct drm_gem_object *obj) +static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); struct page **p; - msm_gem_lock(obj); + msm_gem_assert_locked(obj); if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { - msm_gem_unlock(obj); return ERR_PTR(-EBUSY); } p = get_pages(obj); - if (!IS_ERR(p)) { - msm_obj->pin_count++; - update_inactive(msm_obj); + to_msm_bo(obj)->pin_count++; + update_lru(obj); } - msm_gem_unlock(obj); return p; } -void msm_gem_put_pages(struct drm_gem_object *obj) +struct page **msm_gem_pin_pages(struct drm_gem_object *obj) { - struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct page **p; msm_gem_lock(obj); - msm_obj->pin_count--; - GEM_WARN_ON(msm_obj->pin_count < 0); - update_inactive(msm_obj); + p = msm_gem_pin_pages_locked(obj); + msm_gem_unlock(obj); + + return p; +} + +void msm_gem_unpin_pages(struct drm_gem_object *obj) +{ + msm_gem_lock(obj); + msm_gem_unpin_locked(obj); msm_gem_unlock(obj); } @@ -273,7 +278,7 @@ static uint64_t mmap_offset(struct drm_gem_object *obj) struct drm_device *dev = obj->dev; int ret; - GEM_WARN_ON(!msm_gem_is_locked(obj)); + msm_gem_assert_locked(obj); /* Make it mmapable */ ret = drm_gem_create_mmap_offset(obj); @@ -302,7 +307,7 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_vma *vma; - GEM_WARN_ON(!msm_gem_is_locked(obj)); + msm_gem_assert_locked(obj); vma = kzalloc(sizeof(*vma), GFP_KERNEL); if (!vma) @@ -321,7 +326,7 @@ static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_vma *vma; - GEM_WARN_ON(!msm_gem_is_locked(obj)); + msm_gem_assert_locked(obj); list_for_each_entry(vma, &msm_obj->vmas, list) { if (vma->aspace == aspace) @@ -352,7 +357,7 @@ put_iova_spaces(struct drm_gem_object *obj, bool close) struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_vma *vma; - GEM_WARN_ON(!msm_gem_is_locked(obj)); + msm_gem_assert_locked(obj); list_for_each_entry(vma, &msm_obj->vmas, list) { if (vma->aspace) { @@ -370,7 +375,7 @@ put_iova_vmas(struct drm_gem_object *obj) struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_vma *vma, *tmp; - GEM_WARN_ON(!msm_gem_is_locked(obj)); + msm_gem_assert_locked(obj); list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { del_vma(vma); @@ -383,7 +388,7 @@ static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj, { struct msm_gem_vma *vma; - GEM_WARN_ON(!msm_gem_is_locked(obj)); + msm_gem_assert_locked(obj); vma = lookup_vma(obj, aspace); @@ -423,19 +428,18 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma) if (msm_obj->flags & MSM_BO_CACHED_COHERENT) prot |= IOMMU_CACHE; - GEM_WARN_ON(!msm_gem_is_locked(obj)); + msm_gem_assert_locked(obj); if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) return -EBUSY; - pages = get_pages(obj); + pages = msm_gem_pin_pages_locked(obj); if (IS_ERR(pages)) return PTR_ERR(pages); ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size); - - if (!ret) - msm_obj->pin_count++; + if (ret) + msm_gem_unpin_locked(obj); return ret; } @@ -444,12 +448,12 @@ void msm_gem_unpin_locked(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - GEM_WARN_ON(!msm_gem_is_locked(obj)); + msm_gem_assert_locked(obj); msm_obj->pin_count--; GEM_WARN_ON(msm_obj->pin_count < 0); - update_inactive(msm_obj); + update_lru(obj); } struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj, @@ -465,7 +469,7 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma; int ret; - GEM_WARN_ON(!msm_gem_is_locked(obj)); + msm_gem_assert_locked(obj); vma = get_vma_locked(obj, aspace, range_start, range_end); if (IS_ERR(vma)) @@ -626,7 +630,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret = 0; - GEM_WARN_ON(!msm_gem_is_locked(obj)); + msm_gem_assert_locked(obj); if (obj->import_attach) return ERR_PTR(-ENODEV); @@ -658,7 +662,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv) goto fail; } - update_inactive(msm_obj); + update_lru(obj); } return msm_obj->vaddr; @@ -699,7 +703,7 @@ void msm_gem_put_vaddr_locked(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - GEM_WARN_ON(!msm_gem_is_locked(obj)); + msm_gem_assert_locked(obj); GEM_WARN_ON(msm_obj->vmap_count < 1); msm_obj->vmap_count--; @@ -729,8 +733,7 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv) /* If the obj is inactive, we might need to move it * between inactive lists */ - if (msm_obj->active_count == 0) - update_inactive(msm_obj); + update_lru(obj); msm_gem_unlock(obj); @@ -742,7 +745,7 @@ void msm_gem_purge(struct drm_gem_object *obj) struct drm_device *dev = obj->dev; struct msm_gem_object *msm_obj = to_msm_bo(obj); - GEM_WARN_ON(!msm_gem_is_locked(obj)); + msm_gem_assert_locked(obj); GEM_WARN_ON(!is_purgeable(msm_obj)); /* Get rid of any iommu mapping(s): */ @@ -757,7 +760,6 @@ void msm_gem_purge(struct drm_gem_object *obj) put_iova_vmas(obj); msm_obj->madv = __MSM_MADV_PURGED; - update_inactive(msm_obj); drm_gem_free_mmap_offset(obj); @@ -780,10 +782,8 @@ void msm_gem_evict(struct drm_gem_object *obj) struct drm_device *dev = obj->dev; struct msm_gem_object *msm_obj = to_msm_bo(obj); - GEM_WARN_ON(!msm_gem_is_locked(obj)); + msm_gem_assert_locked(obj); GEM_WARN_ON(is_unevictable(msm_obj)); - GEM_WARN_ON(!msm_obj->evictable); - GEM_WARN_ON(msm_obj->active_count); /* Get rid of any iommu mapping(s): */ put_iova_spaces(obj, false); @@ -791,15 +791,13 @@ void msm_gem_evict(struct drm_gem_object *obj) drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); put_pages(obj); - - update_inactive(msm_obj); } void msm_gem_vunmap(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - GEM_WARN_ON(!msm_gem_is_locked(obj)); + msm_gem_assert_locked(obj); if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj))) return; @@ -808,66 +806,37 @@ void msm_gem_vunmap(struct drm_gem_object *obj) msm_obj->vaddr = NULL; } -void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu) +static void update_lru(struct drm_gem_object *obj) { - struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_drm_private *priv = obj->dev->dev_private; - - might_sleep(); - GEM_WARN_ON(!msm_gem_is_locked(obj)); - GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); - GEM_WARN_ON(msm_obj->dontneed); - - if (msm_obj->active_count++ == 0) { - mutex_lock(&priv->mm_lock); - if (msm_obj->evictable) - mark_unevictable(msm_obj); - list_move_tail(&msm_obj->mm_list, &gpu->active_list); - mutex_unlock(&priv->mm_lock); - } -} - -void msm_gem_active_put(struct drm_gem_object *obj) -{ struct msm_gem_object *msm_obj = to_msm_bo(obj); - might_sleep(); - GEM_WARN_ON(!msm_gem_is_locked(obj)); + msm_gem_assert_locked(&msm_obj->base); - if (--msm_obj->active_count == 0) { - update_inactive(msm_obj); + if (!msm_obj->pages) { + GEM_WARN_ON(msm_obj->pin_count); + GEM_WARN_ON(msm_obj->vmap_count); + + drm_gem_lru_move_tail(&priv->lru.unbacked, obj); + } else if (msm_obj->pin_count || msm_obj->vmap_count) { + drm_gem_lru_move_tail(&priv->lru.pinned, obj); + } else if (msm_obj->madv == MSM_MADV_WILLNEED) { + drm_gem_lru_move_tail(&priv->lru.willneed, obj); + } else { + GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED); + + drm_gem_lru_move_tail(&priv->lru.dontneed, obj); } } -static void update_inactive(struct msm_gem_object *msm_obj) +bool msm_gem_active(struct drm_gem_object *obj) { - struct msm_drm_private *priv = msm_obj->base.dev->dev_private; - - GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base)); + msm_gem_assert_locked(obj); - if (msm_obj->active_count != 0) - return; - - mutex_lock(&priv->mm_lock); - - if (msm_obj->dontneed) - mark_unpurgeable(msm_obj); - if (msm_obj->evictable) - mark_unevictable(msm_obj); - - list_del(&msm_obj->mm_list); - if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) { - list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); - mark_evictable(msm_obj); - } else if (msm_obj->madv == MSM_MADV_DONTNEED) { - list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed); - mark_purgeable(msm_obj); - } else { - GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt); - list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); - } + if (to_msm_bo(obj)->pin_count) + return true; - mutex_unlock(&priv->mm_lock); + return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true)); } int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) @@ -910,7 +879,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, stats->all.count++; stats->all.size += obj->size; - if (is_active(msm_obj)) { + if (msm_gem_active(obj)) { stats->active.count++; stats->active.size += obj->size; } @@ -938,7 +907,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, } seq_printf(m, "%08x: %c %2d (%2d) %08llx %p", - msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', + msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I', obj->name, kref_read(&obj->refcount), off, msm_obj->vaddr); @@ -1015,15 +984,6 @@ static void msm_gem_free_object(struct drm_gem_object *obj) list_del(&msm_obj->node); mutex_unlock(&priv->obj_lock); - mutex_lock(&priv->mm_lock); - if (msm_obj->dontneed) - mark_unpurgeable(msm_obj); - list_del(&msm_obj->mm_list); - mutex_unlock(&priv->mm_lock); - - /* object should not be on active list: */ - GEM_WARN_ON(is_active(msm_obj)); - put_iova_spaces(obj, true); if (obj->import_attach) { @@ -1183,13 +1143,6 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32 to_msm_bo(obj)->vram_node = &vma->node; - /* Call chain get_pages() -> update_inactive() tries to - * access msm_obj->mm_list, but it is not initialized yet. - * To avoid NULL pointer dereference error, initialize - * mm_list to be empty. - */ - INIT_LIST_HEAD(&msm_obj->mm_list); - msm_gem_lock(obj); pages = get_pages(obj); msm_gem_unlock(obj); @@ -1212,9 +1165,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); } - mutex_lock(&priv->mm_lock); - list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); - mutex_unlock(&priv->mm_lock); + drm_gem_lru_move_tail(&priv->lru.unbacked, obj); mutex_lock(&priv->obj_lock); list_add_tail(&msm_obj->node, &priv->objects); @@ -1270,9 +1221,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, msm_gem_unlock(obj); - mutex_lock(&priv->mm_lock); - list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned); - mutex_unlock(&priv->mm_lock); + drm_gem_lru_move_tail(&priv->lru.pinned, obj); mutex_lock(&priv->obj_lock); list_add_tail(&msm_obj->node, &priv->objects); diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 432032ad4aed..c4844cf3a585 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -94,16 +94,6 @@ struct msm_gem_object { uint8_t madv; /** - * Is object on inactive_dontneed list (ie. counted in priv->shrinkable_count)? - */ - bool dontneed : 1; - - /** - * Is object evictable (ie. counted in priv->evictable_count)? - */ - bool evictable : 1; - - /** * count of active vmap'ing */ uint8_t vmap_count; @@ -114,17 +104,6 @@ struct msm_gem_object { */ struct list_head node; - /** - * An object is either: - * inactive - on priv->inactive_dontneed or priv->inactive_willneed - * (depending on purgeability status) - * active - on one one of the gpu's active_list.. well, at - * least for now we don't have (I don't think) hw sync between - * 2d and 3d one devices which have both, meaning we need to - * block on submit if a bo is already on other ring - */ - struct list_head mm_list; - struct page **pages; struct sg_table *sgt; void *vaddr; @@ -138,7 +117,6 @@ struct msm_gem_object { char name[32]; /* Identifier to print for the debugfs files */ - int active_count; int pin_count; }; #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) @@ -159,8 +137,8 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova); void msm_gem_unpin_iova(struct drm_gem_object *obj, struct msm_gem_address_space *aspace); -struct page **msm_gem_get_pages(struct drm_gem_object *obj); -void msm_gem_put_pages(struct drm_gem_object *obj); +struct page **msm_gem_pin_pages(struct drm_gem_object *obj); +void msm_gem_unpin_pages(struct drm_gem_object *obj); int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, @@ -171,8 +149,7 @@ void *msm_gem_get_vaddr_active(struct drm_gem_object *obj); void msm_gem_put_vaddr_locked(struct drm_gem_object *obj); void msm_gem_put_vaddr(struct drm_gem_object *obj); int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); -void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu); -void msm_gem_active_put(struct drm_gem_object *obj); +bool msm_gem_active(struct drm_gem_object *obj); int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); int msm_gem_cpu_fini(struct drm_gem_object *obj); int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, @@ -208,12 +185,6 @@ msm_gem_lock(struct drm_gem_object *obj) dma_resv_lock(obj->resv, NULL); } -static inline bool __must_check -msm_gem_trylock(struct drm_gem_object *obj) -{ - return dma_resv_trylock(obj->resv); -} - static inline int msm_gem_lock_interruptible(struct drm_gem_object *obj) { @@ -226,8 +197,8 @@ msm_gem_unlock(struct drm_gem_object *obj) dma_resv_unlock(obj->resv); } -static inline bool -msm_gem_is_locked(struct drm_gem_object *obj) +static inline void +msm_gem_assert_locked(struct drm_gem_object *obj) { /* * Destroying the object is a special case.. msm_gem_free_object() @@ -241,13 +212,10 @@ msm_gem_is_locked(struct drm_gem_object *obj) * Unfortunately lockdep is not aware of this detail. So when the * refcount drops to zero, we pretend it is already locked. */ - return dma_resv_is_locked(obj->resv) || (kref_read(&obj->refcount) == 0); -} - -static inline bool is_active(struct msm_gem_object *msm_obj) -{ - GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base)); - return msm_obj->active_count; + lockdep_assert_once( + (kref_read(&obj->refcount) == 0) || + (lockdep_is_held(&obj->resv->lock.base) != LOCK_STATE_NOT_HELD) + ); } /* imported/exported objects are not purgeable: */ @@ -264,81 +232,15 @@ static inline bool is_purgeable(struct msm_gem_object *msm_obj) static inline bool is_vunmapable(struct msm_gem_object *msm_obj) { - GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base)); + msm_gem_assert_locked(&msm_obj->base); return (msm_obj->vmap_count == 0) && msm_obj->vaddr; } -static inline void mark_purgeable(struct msm_gem_object *msm_obj) -{ - struct msm_drm_private *priv = msm_obj->base.dev->dev_private; - - GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock)); - - if (is_unpurgeable(msm_obj)) - return; - - if (GEM_WARN_ON(msm_obj->dontneed)) - return; - - priv->shrinkable_count += msm_obj->base.size >> PAGE_SHIFT; - msm_obj->dontneed = true; -} - -static inline void mark_unpurgeable(struct msm_gem_object *msm_obj) -{ - struct msm_drm_private *priv = msm_obj->base.dev->dev_private; - - GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock)); - - if (is_unpurgeable(msm_obj)) - return; - - if (GEM_WARN_ON(!msm_obj->dontneed)) - return; - - priv->shrinkable_count -= msm_obj->base.size >> PAGE_SHIFT; - GEM_WARN_ON(priv->shrinkable_count < 0); - msm_obj->dontneed = false; -} - static inline bool is_unevictable(struct msm_gem_object *msm_obj) { return is_unpurgeable(msm_obj) || msm_obj->vaddr; } -static inline void mark_evictable(struct msm_gem_object *msm_obj) -{ - struct msm_drm_private *priv = msm_obj->base.dev->dev_private; - - WARN_ON(!mutex_is_locked(&priv->mm_lock)); - - if (is_unevictable(msm_obj)) - return; - - if (WARN_ON(msm_obj->evictable)) - return; - - priv->evictable_count += msm_obj->base.size >> PAGE_SHIFT; - msm_obj->evictable = true; -} - -static inline void mark_unevictable(struct msm_gem_object *msm_obj) -{ - struct msm_drm_private *priv = msm_obj->base.dev->dev_private; - - WARN_ON(!mutex_is_locked(&priv->mm_lock)); - - if (is_unevictable(msm_obj)) - return; - - if (WARN_ON(!msm_obj->evictable)) - return; - - priv->evictable_count -= msm_obj->base.size >> PAGE_SHIFT; - WARN_ON(priv->evictable_count < 0); - msm_obj->evictable = false; -} - void msm_gem_purge(struct drm_gem_object *obj); void msm_gem_evict(struct drm_gem_object *obj); void msm_gem_vunmap(struct drm_gem_object *obj); @@ -390,9 +292,8 @@ struct msm_gem_submit { /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */ #define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */ #define BO_LOCKED 0x4000 /* obj lock is held */ -#define BO_ACTIVE 0x2000 /* active refcnt is held */ -#define BO_OBJ_PINNED 0x1000 /* obj (pages) is pinned and on active list */ -#define BO_VMA_PINNED 0x0800 /* vma (virtual address) is pinned */ +#define BO_OBJ_PINNED 0x2000 /* obj (pages) is pinned and on active list */ +#define BO_VMA_PINNED 0x1000 /* vma (virtual address) is pinned */ uint32_t flags; union { struct msm_gem_object *obj; diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index dcc8a573bc76..c1d91863df05 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -63,12 +63,12 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, int msm_gem_prime_pin(struct drm_gem_object *obj) { if (!obj->import_attach) - msm_gem_get_pages(obj); + msm_gem_pin_pages(obj); return 0; } void msm_gem_prime_unpin(struct drm_gem_object *obj) { if (!obj->import_attach) - msm_gem_put_pages(obj); + msm_gem_unpin_pages(obj); } diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 0317055e3253..1de14e67f96b 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -24,103 +24,77 @@ static bool can_swap(void) return enable_eviction && get_nr_swap_pages() > 0; } +static bool can_block(struct shrink_control *sc) +{ + if (!(sc->gfp_mask & __GFP_DIRECT_RECLAIM)) + return false; + return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM); +} + static unsigned long msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) { struct msm_drm_private *priv = container_of(shrinker, struct msm_drm_private, shrinker); - unsigned count = priv->shrinkable_count; + unsigned count = priv->lru.dontneed.count; if (can_swap()) - count += priv->evictable_count; + count += priv->lru.willneed.count; return count; } static bool -purge(struct msm_gem_object *msm_obj) +purge(struct drm_gem_object *obj) { - if (!is_purgeable(msm_obj)) + if (!is_purgeable(to_msm_bo(obj))) return false; - /* - * This will move the obj out of still_in_list to - * the purged list - */ - msm_gem_purge(&msm_obj->base); + if (msm_gem_active(obj)) + return false; + + msm_gem_purge(obj); return true; } static bool -evict(struct msm_gem_object *msm_obj) +evict(struct drm_gem_object *obj) { - if (is_unevictable(msm_obj)) + if (is_unevictable(to_msm_bo(obj))) + return false; + + if (msm_gem_active(obj)) return false; - msm_gem_evict(&msm_obj->base); + msm_gem_evict(obj); return true; } -static unsigned long -scan(struct msm_drm_private *priv, unsigned nr_to_scan, struct list_head *list, - bool (*shrink)(struct msm_gem_object *msm_obj)) +static bool +wait_for_idle(struct drm_gem_object *obj) { - unsigned freed = 0; - struct list_head still_in_list; - - INIT_LIST_HEAD(&still_in_list); - - mutex_lock(&priv->mm_lock); - - while (freed < nr_to_scan) { - struct msm_gem_object *msm_obj = list_first_entry_or_null( - list, typeof(*msm_obj), mm_list); - - if (!msm_obj) - break; - - list_move_tail(&msm_obj->mm_list, &still_in_list); - - /* - * If it is in the process of being freed, msm_gem_free_object - * can be blocked on mm_lock waiting to remove it. So just - * skip it. - */ - if (!kref_get_unless_zero(&msm_obj->base.refcount)) - continue; - - /* - * Now that we own a reference, we can drop mm_lock for the - * rest of the loop body, to reduce contention with the - * retire_submit path (which could make more objects purgeable) - */ - - mutex_unlock(&priv->mm_lock); - - /* - * Note that this still needs to be trylock, since we can - * hit shrinker in response to trying to get backing pages - * for this obj (ie. while it's lock is already held) - */ - if (!msm_gem_trylock(&msm_obj->base)) - goto tail; - - if (shrink(msm_obj)) - freed += msm_obj->base.size >> PAGE_SHIFT; + enum dma_resv_usage usage = dma_resv_usage_rw(true); + return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0; +} - msm_gem_unlock(&msm_obj->base); +static bool +active_purge(struct drm_gem_object *obj) +{ + if (!wait_for_idle(obj)) + return false; -tail: - drm_gem_object_put(&msm_obj->base); - mutex_lock(&priv->mm_lock); - } + return purge(obj); +} - list_splice_tail(&still_in_list, list); - mutex_unlock(&priv->mm_lock); +static bool +active_evict(struct drm_gem_object *obj) +{ + if (!wait_for_idle(obj)) + return false; - return freed; + return evict(obj); } static unsigned long @@ -128,21 +102,34 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) { struct msm_drm_private *priv = container_of(shrinker, struct msm_drm_private, shrinker); - unsigned long freed; - - freed = scan(priv, sc->nr_to_scan, &priv->inactive_dontneed, purge); - - if (freed > 0) - trace_msm_gem_purge(freed << PAGE_SHIFT); - - if (can_swap() && freed < sc->nr_to_scan) { - int evicted = scan(priv, sc->nr_to_scan - freed, - &priv->inactive_willneed, evict); + struct { + struct drm_gem_lru *lru; + bool (*shrink)(struct drm_gem_object *obj); + bool cond; + unsigned long freed; + } stages[] = { + /* Stages of progressively more aggressive/expensive reclaim: */ + { &priv->lru.dontneed, purge, true }, + { &priv->lru.willneed, evict, can_swap() }, + { &priv->lru.dontneed, active_purge, can_block(sc) }, + { &priv->lru.willneed, active_evict, can_swap() && can_block(sc) }, + }; + long nr = sc->nr_to_scan; + unsigned long freed = 0; - if (evicted > 0) - trace_msm_gem_evict(evicted << PAGE_SHIFT); + for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) { + if (!stages[i].cond) + continue; + stages[i].freed = + drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink); + nr -= stages[i].freed; + freed += stages[i].freed; + } - freed += evicted; + if (freed) { + trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed, + stages[1].freed, stages[2].freed, + stages[3].freed); } return (freed > 0) ? freed : SHRINK_STOP; @@ -173,12 +160,12 @@ msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan) static const int vmap_shrink_limit = 15; static bool -vmap_shrink(struct msm_gem_object *msm_obj) +vmap_shrink(struct drm_gem_object *obj) { - if (!is_vunmapable(msm_obj)) + if (!is_vunmapable(to_msm_bo(obj))) return false; - msm_gem_vunmap(&msm_obj->base); + msm_gem_vunmap(obj); return true; } @@ -188,17 +175,18 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) { struct msm_drm_private *priv = container_of(nb, struct msm_drm_private, vmap_notifier); - struct list_head *mm_lists[] = { - &priv->inactive_dontneed, - &priv->inactive_willneed, - priv->gpu ? &priv->gpu->active_list : NULL, + struct drm_gem_lru *lrus[] = { + &priv->lru.dontneed, + &priv->lru.willneed, + &priv->lru.pinned, NULL, }; unsigned idx, unmapped = 0; - for (idx = 0; mm_lists[idx] && unmapped < vmap_shrink_limit; idx++) { - unmapped += scan(priv, vmap_shrink_limit - unmapped, - mm_lists[idx], vmap_shrink); + for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) { + unmapped += drm_gem_lru_scan(lrus[idx], + vmap_shrink_limit - unmapped, + vmap_shrink); } *(unsigned long *)ptr += unmapped; diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index c9e4aeb14f4a..5599d93ec0d2 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -26,6 +26,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, struct msm_gpu_submitqueue *queue, uint32_t nr_bos, uint32_t nr_cmds) { + static atomic_t ident = ATOMIC_INIT(0); struct msm_gem_submit *submit; uint64_t sz; int ret; @@ -36,7 +37,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, if (sz > SIZE_MAX) return ERR_PTR(-ENOMEM); - submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); + submit = kzalloc(sz, GFP_KERNEL); if (!submit) return ERR_PTR(-ENOMEM); @@ -52,9 +53,13 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, submit->gpu = gpu; submit->cmd = (void *)&submit->bos[nr_bos]; submit->queue = queue; + submit->pid = get_pid(task_pid(current)); submit->ring = gpu->rb[queue->ring_nr]; submit->fault_dumped = false; + /* Get a unique identifier for the submission for logging purposes */ + submit->ident = atomic_inc_return(&ident) - 1; + INIT_LIST_HEAD(&submit->node); return submit; @@ -67,9 +72,9 @@ void __msm_gem_submit_destroy(struct kref *kref) unsigned i; if (submit->fence_id) { - mutex_lock(&submit->queue->lock); + mutex_lock(&submit->queue->idr_lock); idr_remove(&submit->queue->fence_idr, submit->fence_id); - mutex_unlock(&submit->queue->lock); + mutex_unlock(&submit->queue->idr_lock); } dma_fence_put(submit->user_fence); @@ -238,17 +243,13 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i, if (flags & BO_OBJ_PINNED) msm_gem_unpin_locked(obj); - if (flags & BO_ACTIVE) - msm_gem_active_put(obj); - if (flags & BO_LOCKED) dma_resv_unlock(obj->resv); } static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i) { - unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED | - BO_ACTIVE | BO_LOCKED; + unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED | BO_LOCKED; submit_cleanup_bo(submit, i, cleanup_flags); if (!(submit->bos[i].flags & BO_VALID)) @@ -353,18 +354,6 @@ static int submit_pin_objects(struct msm_gem_submit *submit) submit->valid = true; - /* - * Increment active_count first, so if under memory pressure, we - * don't inadvertently evict a bo needed by the submit in order - * to pin an earlier bo in the same submit. - */ - for (i = 0; i < submit->nr_bos; i++) { - struct drm_gem_object *obj = &submit->bos[i].obj->base; - - msm_gem_active_get(obj, submit->gpu); - submit->bos[i].flags |= BO_ACTIVE; - } - for (i = 0; i < submit->nr_bos; i++) { struct drm_gem_object *obj = &submit->bos[i].obj->base; struct msm_gem_vma *vma; @@ -512,11 +501,11 @@ out: */ static void submit_cleanup(struct msm_gem_submit *submit, bool error) { - unsigned cleanup_flags = BO_LOCKED; + unsigned cleanup_flags = BO_LOCKED | BO_OBJ_PINNED; unsigned i; if (error) - cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED | BO_ACTIVE; + cleanup_flags |= BO_VMA_PINNED; for (i = 0; i < submit->nr_bos; i++) { struct msm_gem_object *msm_obj = submit->bos[i].obj; @@ -533,10 +522,6 @@ void msm_submit_retire(struct msm_gem_submit *submit) for (i = 0; i < submit->nr_bos; i++) { struct drm_gem_object *obj = &submit->bos[i].obj->base; - msm_gem_lock(obj); - /* Note, VMA already fence-unpinned before submit: */ - submit_cleanup_bo(submit, i, BO_OBJ_PINNED | BO_ACTIVE); - msm_gem_unlock(obj); drm_gem_object_put(obj); } } @@ -718,7 +703,6 @@ static void msm_process_post_deps(struct msm_submit_post_dep *post_deps, int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file) { - static atomic_t ident = ATOMIC_INIT(0); struct msm_drm_private *priv = dev->dev_private; struct drm_msm_gem_submit *args = data; struct msm_file_private *ctx = file->driver_priv; @@ -729,10 +713,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct msm_submit_post_dep *post_deps = NULL; struct drm_syncobj **syncobjs_to_reset = NULL; int out_fence_fd = -1; - struct pid *pid = get_pid(task_pid(current)); bool has_ww_ticket = false; unsigned i; - int ret, submitid; + int ret; if (!gpu) return -ENXIO; @@ -764,35 +747,26 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (!queue) return -ENOENT; - /* Get a unique identifier for the submission for logging purposes */ - submitid = atomic_inc_return(&ident) - 1; - ring = gpu->rb[queue->ring_nr]; - trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid, - args->nr_bos, args->nr_cmds); - - ret = mutex_lock_interruptible(&queue->lock); - if (ret) - goto out_post_unlock; if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) { out_fence_fd = get_unused_fd_flags(O_CLOEXEC); if (out_fence_fd < 0) { ret = out_fence_fd; - goto out_unlock; + return ret; } } - submit = submit_create(dev, gpu, queue, args->nr_bos, - args->nr_cmds); - if (IS_ERR(submit)) { - ret = PTR_ERR(submit); - submit = NULL; - goto out_unlock; - } + submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds); + if (IS_ERR(submit)) + return PTR_ERR(submit); + + trace_msm_gpu_submit(pid_nr(submit->pid), ring->id, submit->ident, + args->nr_bos, args->nr_cmds); - submit->pid = pid; - submit->ident = submitid; + ret = mutex_lock_interruptible(&queue->lock); + if (ret) + goto out_post_unlock; if (args->flags & MSM_SUBMIT_SUDO) submit->in_rb = true; @@ -887,6 +861,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, submit->nr_cmds = i; + mutex_lock(&queue->idr_lock); + /* * If using userspace provided seqno fence, validate that the id * is available before arming sched job. Since access to fence_idr @@ -895,6 +871,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, */ if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) && idr_find(&queue->fence_idr, args->fence)) { + mutex_unlock(&queue->idr_lock); ret = -EINVAL; goto out; } @@ -927,6 +904,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, submit->user_fence, 1, INT_MAX, GFP_KERNEL); } + + mutex_unlock(&queue->idr_lock); + if (submit->fence_id < 0) { ret = submit->fence_id; submit->fence_id = 0; @@ -965,9 +945,9 @@ out_unlock: if (ret && (out_fence_fd >= 0)) put_unused_fd(out_fence_fd); mutex_unlock(&queue->lock); +out_post_unlock: if (submit) msm_gem_submit_put(submit); -out_post_unlock: if (!IS_ERR_OR_NULL(post_deps)) { for (i = 0; i < args->nr_out_syncobjs; ++i) { kfree(post_deps[i].chain); diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index c2bfcf3f1f40..0098ee8438aa 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -16,6 +16,7 @@ #include <generated/utsrelease.h> #include <linux/string_helpers.h> #include <linux/devcoredump.h> +#include <linux/reset.h> #include <linux/sched/task.h> /* @@ -394,7 +395,6 @@ static void recover_worker(struct kthread_work *work) /* Record the crash state */ pm_runtime_get_sync(&gpu->pdev->dev); msm_gpu_crashstate_capture(gpu, submit, comm, cmd); - pm_runtime_put_sync(&gpu->pdev->dev); kfree(cmd); kfree(comm); @@ -423,9 +423,7 @@ static void recover_worker(struct kthread_work *work) /* retire completed submits, plus the one that hung: */ retire_submits(gpu); - pm_runtime_get_sync(&gpu->pdev->dev); gpu->funcs->recover(gpu); - pm_runtime_put_sync(&gpu->pdev->dev); /* * Replay all remaining submits starting with highest priority @@ -442,6 +440,8 @@ static void recover_worker(struct kthread_work *work) } } + pm_runtime_put(&gpu->pdev->dev); + mutex_unlock(&gpu->lock); msm_gpu_retire(gpu); @@ -664,11 +664,12 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring, mutex_lock(&gpu->active_lock); gpu->active_submits--; WARN_ON(gpu->active_submits < 0); - if (!gpu->active_submits) + if (!gpu->active_submits) { msm_devfreq_idle(gpu); - mutex_unlock(&gpu->active_lock); + pm_runtime_put_autosuspend(&gpu->pdev->dev); + } - pm_runtime_put_autosuspend(&gpu->pdev->dev); + mutex_unlock(&gpu->active_lock); msm_gem_submit_put(submit); } @@ -757,14 +758,17 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) /* Update devfreq on transition from idle->active: */ mutex_lock(&gpu->active_lock); - if (!gpu->active_submits) + if (!gpu->active_submits) { + pm_runtime_get(&gpu->pdev->dev); msm_devfreq_active(gpu); + } gpu->active_submits++; mutex_unlock(&gpu->active_lock); gpu->funcs->submit(gpu, submit); gpu->cur_ctx_seqno = submit->queue->ctx->seqno; + pm_runtime_put(&gpu->pdev->dev); hangcheck_timer_reset(gpu); } @@ -846,7 +850,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, sched_set_fifo_low(gpu->worker->task); - INIT_LIST_HEAD(&gpu->active_list); mutex_init(&gpu->active_lock); mutex_init(&gpu->lock); init_waitqueue_head(&gpu->retire_event); @@ -901,6 +904,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, if (IS_ERR(gpu->gpu_cx)) gpu->gpu_cx = NULL; + gpu->cx_collapse = devm_reset_control_get_optional_exclusive(&pdev->dev, + "cx_collapse"); + gpu->pdev = pdev; platform_set_drvdata(pdev, &gpu->adreno_smmu); @@ -974,8 +980,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) DBG("%s", gpu->name); - WARN_ON(!list_empty(&gpu->active_list)); - for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) { msm_ringbuffer_destroy(gpu->rb[i]); gpu->rb[i] = NULL; diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 4d935fedd2ac..ff911e7305ce 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -13,6 +13,7 @@ #include <linux/interconnect.h> #include <linux/pm_opp.h> #include <linux/regulator/consumer.h> +#include <linux/reset.h> #include "msm_drv.h" #include "msm_fence.h" @@ -187,12 +188,6 @@ struct msm_gpu { */ int cur_ctx_seqno; - /* - * List of GEM active objects on this gpu. Protected by - * msm_drm_private::mm_lock - */ - struct list_head active_list; - /** * lock: * @@ -277,6 +272,9 @@ struct msm_gpu { bool hw_apriv; struct thermal_cooling_device *cooling; + + /* To poll for cx gdsc collapse during gpu recovery */ + struct reset_control *cx_collapse; }; static inline struct msm_gpu *dev_to_gpu(struct device *dev) @@ -466,7 +464,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio, * @node: node in the context's list of submitqueues * @fence_idr: maps fence-id to dma_fence for userspace visible fence * seqno, protected by submitqueue lock - * @lock: submitqueue lock + * @idr_lock: for serializing access to fence_idr + * @lock: submitqueue lock for serializing submits on a queue * @ref: reference count * @entity: the submit job-queue */ @@ -479,6 +478,7 @@ struct msm_gpu_submitqueue { struct msm_file_private *ctx; struct list_head node; struct idr fence_idr; + struct mutex idr_lock; struct mutex lock; struct kref ref; struct drm_sched_entity *entity; diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c index d1f70426f554..85c443a37e4e 100644 --- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c +++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c @@ -213,6 +213,8 @@ void msm_devfreq_init(struct msm_gpu *gpu) if (IS_ERR(df->devfreq)) { DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n"); + dev_pm_qos_remove_request(&df->idle_freq); + dev_pm_qos_remove_request(&df->boost_freq); df->devfreq = NULL; return; } diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h index ca0b08d7875b..ac40d857bc45 100644 --- a/drivers/gpu/drm/msm/msm_gpu_trace.h +++ b/drivers/gpu/drm/msm/msm_gpu_trace.h @@ -115,29 +115,27 @@ TRACE_EVENT(msm_gmu_freq_change, ); -TRACE_EVENT(msm_gem_purge, - TP_PROTO(u32 bytes), - TP_ARGS(bytes), +TRACE_EVENT(msm_gem_shrink, + TP_PROTO(u32 nr_to_scan, u32 purged, u32 evicted, + u32 active_purged, u32 active_evicted), + TP_ARGS(nr_to_scan, purged, evicted, active_purged, active_evicted), TP_STRUCT__entry( - __field(u32, bytes) + __field(u32, nr_to_scan) + __field(u32, purged) + __field(u32, evicted) + __field(u32, active_purged) + __field(u32, active_evicted) ), TP_fast_assign( - __entry->bytes = bytes; + __entry->nr_to_scan = nr_to_scan; + __entry->purged = purged; + __entry->evicted = evicted; + __entry->active_purged = active_purged; + __entry->active_evicted = active_evicted; ), - TP_printk("Purging %u bytes", __entry->bytes) -); - - -TRACE_EVENT(msm_gem_evict, - TP_PROTO(u32 bytes), - TP_ARGS(bytes), - TP_STRUCT__entry( - __field(u32, bytes) - ), - TP_fast_assign( - __entry->bytes = bytes; - ), - TP_printk("Evicting %u bytes", __entry->bytes) + TP_printk("nr_to_scan=%u pg, purged=%u pg, evicted=%u pg, active_purged=%u pg, active_evicted=%u pg", + __entry->nr_to_scan, __entry->purged, __entry->evicted, + __entry->active_purged, __entry->active_evicted) ); diff --git a/drivers/gpu/drm/msm/msm_io_utils.c b/drivers/gpu/drm/msm/msm_io_utils.c index 7b504617833a..d02cd29ce829 100644 --- a/drivers/gpu/drm/msm/msm_io_utils.c +++ b/drivers/gpu/drm/msm/msm_io_utils.c @@ -5,6 +5,8 @@ * Author: Rob Clark <robdclark@gmail.com> */ +#include <linux/interconnect.h> + #include "msm_drv.h" /* @@ -124,3 +126,23 @@ void msm_hrtimer_work_init(struct msm_hrtimer_work *work, work->worker = worker; kthread_init_work(&work->work, fn); } + +struct icc_path *msm_icc_get(struct device *dev, const char *name) +{ + struct device *mdss_dev = dev->parent; + struct icc_path *path; + + path = of_icc_get(dev, name); + if (path) + return path; + + /* + * If there are no interconnects attached to the corresponding device + * node, of_icc_get() will return NULL. + * + * If the MDP5/DPU device node doesn't have interconnects, lookup the + * path in the parent (MDSS) device. + */ + return of_icc_get(mdss_dev, name); + +} diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index a54ed354578b..5577cea7c009 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -21,6 +21,7 @@ struct msm_iommu_pagetable { struct msm_mmu base; struct msm_mmu *parent; struct io_pgtable_ops *pgtbl_ops; + unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ phys_addr_t ttbr; u32 asid; }; @@ -29,23 +30,84 @@ static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu) return container_of(mmu, struct msm_iommu_pagetable, base); } +/* based on iommu_pgsize() in iommu.c: */ +static size_t calc_pgsize(struct msm_iommu_pagetable *pagetable, + unsigned long iova, phys_addr_t paddr, + size_t size, size_t *count) +{ + unsigned int pgsize_idx, pgsize_idx_next; + unsigned long pgsizes; + size_t offset, pgsize, pgsize_next; + unsigned long addr_merge = paddr | iova; + + /* Page sizes supported by the hardware and small enough for @size */ + pgsizes = pagetable->pgsize_bitmap & GENMASK(__fls(size), 0); + + /* Constrain the page sizes further based on the maximum alignment */ + if (likely(addr_merge)) + pgsizes &= GENMASK(__ffs(addr_merge), 0); + + /* Make sure we have at least one suitable page size */ + BUG_ON(!pgsizes); + + /* Pick the biggest page size remaining */ + pgsize_idx = __fls(pgsizes); + pgsize = BIT(pgsize_idx); + if (!count) + return pgsize; + + /* Find the next biggest support page size, if it exists */ + pgsizes = pagetable->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); + if (!pgsizes) + goto out_set_count; + + pgsize_idx_next = __ffs(pgsizes); + pgsize_next = BIT(pgsize_idx_next); + + /* + * There's no point trying a bigger page size unless the virtual + * and physical addresses are similarly offset within the larger page. + */ + if ((iova ^ paddr) & (pgsize_next - 1)) + goto out_set_count; + + /* Calculate the offset to the next page size alignment boundary */ + offset = pgsize_next - (addr_merge & (pgsize_next - 1)); + + /* + * If size is big enough to accommodate the larger page, reduce + * the number of smaller pages. + */ + if (offset + pgsize_next <= size) + size = offset; + +out_set_count: + *count = size >> pgsize_idx; + return pgsize; +} + static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova, size_t size) { struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); struct io_pgtable_ops *ops = pagetable->pgtbl_ops; - size_t unmapped = 0; - /* Unmap the block one page at a time */ while (size) { - unmapped += ops->unmap(ops, iova, 4096, NULL); - iova += 4096; - size -= 4096; + size_t unmapped, pgsize, count; + + pgsize = calc_pgsize(pagetable, iova, iova, size, &count); + + unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL); + if (!unmapped) + break; + + iova += unmapped; + size -= unmapped; } iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain); - return (unmapped == size) ? 0 : -EINVAL; + return (size == 0) ? 0 : -EINVAL; } static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova, @@ -54,7 +116,6 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova, struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); struct io_pgtable_ops *ops = pagetable->pgtbl_ops; struct scatterlist *sg; - size_t mapped = 0; u64 addr = iova; unsigned int i; @@ -62,17 +123,26 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova, size_t size = sg->length; phys_addr_t phys = sg_phys(sg); - /* Map the block one page at a time */ while (size) { - if (ops->map(ops, addr, phys, 4096, prot, GFP_KERNEL)) { - msm_iommu_pagetable_unmap(mmu, iova, mapped); + size_t pgsize, count, mapped = 0; + int ret; + + pgsize = calc_pgsize(pagetable, addr, phys, size, &count); + + ret = ops->map_pages(ops, addr, phys, pgsize, count, + prot, GFP_KERNEL, &mapped); + + /* map_pages could fail after mapping some of the pages, + * so update the counters before error handling. + */ + phys += mapped; + addr += mapped; + size -= mapped; + + if (ret) { + msm_iommu_pagetable_unmap(mmu, iova, addr - iova); return -EINVAL; } - - phys += 4096; - addr += 4096; - size -= 4096; - mapped += 4096; } } @@ -207,6 +277,7 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) /* Needed later for TLB flush */ pagetable->parent = parent; + pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap; pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr; /* diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index a92ffde53f0b..db2f847c8535 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -196,6 +196,9 @@ static int rd_open(struct inode *inode, struct file *file) file->private_data = rd; rd->open = true; + /* Reset fifo to clear any previously unread data: */ + rd->fifo.head = rd->fifo.tail = 0; + /* the parsing tools need to know gpu-id to know which * register database to load. * diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 56eecb4a72dc..cad4c3525f0b 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -29,8 +29,6 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job) msm_gem_unlock(obj); } - pm_runtime_get_sync(&gpu->pdev->dev); - /* TODO move submit path over to using a per-ring lock.. */ mutex_lock(&gpu->lock); @@ -38,8 +36,6 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job) mutex_unlock(&gpu->lock); - pm_runtime_put(&gpu->pdev->dev); - return dma_fence_get(submit->hw_fence); } diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c index f486a3cd4e55..c6929e205b51 100644 --- a/drivers/gpu/drm/msm/msm_submitqueue.c +++ b/drivers/gpu/drm/msm/msm_submitqueue.c @@ -200,6 +200,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, *id = queue->id; idr_init(&queue->fence_idr); + mutex_init(&queue->idr_lock); mutex_init(&queue->lock); list_add_tail(&queue->node, &ctx->submitqueues); diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index 660c4cbc0b3d..ee92d576d277 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c @@ -1276,37 +1276,9 @@ static const uint32_t modeset_formats[] = { }; static const struct drm_plane_funcs nv04_primary_plane_funcs = { - .update_plane = drm_plane_helper_update_primary, - .disable_plane = drm_plane_helper_disable_primary, - .destroy = drm_plane_helper_destroy, + DRM_PLANE_NON_ATOMIC_FUNCS, }; -static struct drm_plane * -create_primary_plane(struct drm_device *dev) -{ - struct drm_plane *primary; - int ret; - - primary = kzalloc(sizeof(*primary), GFP_KERNEL); - if (primary == NULL) { - DRM_DEBUG_KMS("Failed to allocate primary plane\n"); - return NULL; - } - - /* possible_crtc's will be filled in later by crtc_init */ - ret = drm_universal_plane_init(dev, primary, 0, - &nv04_primary_plane_funcs, - modeset_formats, - ARRAY_SIZE(modeset_formats), NULL, - DRM_PLANE_TYPE_PRIMARY, NULL); - if (ret) { - kfree(primary); - primary = NULL; - } - - return primary; -} - static int nv04_crtc_vblank_handler(struct nvif_notify *notify) { struct nouveau_crtc *nv_crtc = @@ -1321,6 +1293,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num) { struct nouveau_display *disp = nouveau_display(dev); struct nouveau_crtc *nv_crtc; + struct drm_plane *primary; int ret; nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL); @@ -1335,8 +1308,18 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num) nv_crtc->save = nv_crtc_save; nv_crtc->restore = nv_crtc_restore; - drm_crtc_init_with_planes(dev, &nv_crtc->base, - create_primary_plane(dev), NULL, + primary = __drm_universal_plane_alloc(dev, sizeof(*primary), 0, 0, + &nv04_primary_plane_funcs, + modeset_formats, + ARRAY_SIZE(modeset_formats), NULL, + DRM_PLANE_TYPE_PRIMARY, NULL); + if (IS_ERR(primary)) { + ret = PTR_ERR(primary); + kfree(nv_crtc); + return ret; + } + + drm_crtc_init_with_planes(dev, &nv_crtc->base, primary, NULL, &nv04_crtc_funcs, NULL); drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs); drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c index 37e63e98cd08..33f29736024a 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c +++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c @@ -296,9 +296,10 @@ nv10_overlay_init(struct drm_device *device) break; } - ret = drm_plane_init(device, &plane->base, 3 /* both crtc's */, - &nv10_plane_funcs, - formats, num_formats, false); + ret = drm_universal_plane_init(device, &plane->base, 3 /* both crtc's */, + &nv10_plane_funcs, + formats, num_formats, NULL, + DRM_PLANE_TYPE_OVERLAY, NULL); if (ret) goto err; @@ -475,9 +476,9 @@ nv04_overlay_init(struct drm_device *device) if (!plane) return; - ret = drm_plane_init(device, &plane->base, 1 /* single crtc */, - &nv04_plane_funcs, - formats, 2, false); + ret = drm_universal_plane_init(device, &plane->base, 1 /* single crtc */, + &nv04_plane_funcs, formats, 2, NULL, + DRM_PLANE_TYPE_OVERLAY, NULL); if (ret) goto err; diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 6140db756d06..8cf096f841a9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -386,3 +386,13 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) return kmemdup(edid, EDID_LENGTH, GFP_KERNEL); } + +bool nouveau_acpi_video_backlight_use_native(void) +{ + return acpi_video_backlight_use_native(); +} + +void nouveau_acpi_video_register_backlight(void) +{ + acpi_video_register_backlight(); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h index 330f9b837066..e39dd8b94b8b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.h +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h @@ -11,6 +11,8 @@ void nouveau_register_dsm_handler(void); void nouveau_unregister_dsm_handler(void); void nouveau_switcheroo_optimus_dsm(void); void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *); +bool nouveau_acpi_video_backlight_use_native(void); +void nouveau_acpi_video_register_backlight(void); #else static inline bool nouveau_is_optimus(void) { return false; }; static inline bool nouveau_is_v1_dsm(void) { return false; }; @@ -18,6 +20,8 @@ static inline void nouveau_register_dsm_handler(void) {} static inline void nouveau_unregister_dsm_handler(void) {} static inline void nouveau_switcheroo_optimus_dsm(void) {} static inline void *nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return NULL; } +static inline bool nouveau_acpi_video_backlight_use_native(void) { return true; } +static inline void nouveau_acpi_video_register_backlight(void) {} #endif #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index a2141d3d9b1d..a614582779ca 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -38,6 +38,7 @@ #include "nouveau_reg.h" #include "nouveau_encoder.h" #include "nouveau_connector.h" +#include "nouveau_acpi.h" static struct ida bl_ida; #define BL_NAME_SIZE 15 // 12 for name + 2 for digits + 1 for '\0' @@ -405,6 +406,11 @@ nouveau_backlight_init(struct drm_connector *connector) goto fail_alloc; } + if (!nouveau_acpi_video_backlight_use_native()) { + NV_INFO(drm, "Skipping nv_backlight registration\n"); + goto fail_alloc; + } + if (!nouveau_get_backlight_name(backlight_name, bl)) { NV_ERROR(drm, "Failed to retrieve a unique name for the backlight interface\n"); goto fail_alloc; @@ -430,6 +436,13 @@ nouveau_backlight_init(struct drm_connector *connector) fail_alloc: kfree(bl); + /* + * If we get here we have an internal panel, but no nv_backlight, + * try registering an ACPI video backlight device instead. + */ + if (ret == 0) + nouveau_acpi_video_register_backlight(); + return ret; } diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index c4de142cc85b..0ee344ebcd1c 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -2451,7 +2451,7 @@ static int dispc_ovl_calc_scaling_44xx(struct dispc_device *dispc, *decim_x = DIV_ROUND_UP(width, in_width_max); - *decim_x = *decim_x > decim_x_min ? *decim_x : decim_x_min; + *decim_x = max(*decim_x, decim_x_min); if (*decim_x > *x_predecim) return -EINVAL; diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 0399f3390a0a..c4febb861910 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1176,6 +1176,7 @@ static void __dss_uninit_ports(struct dss_device *dss, unsigned int num_ports) default: break; } + of_node_put(port); } } @@ -1208,11 +1209,13 @@ static int dss_init_ports(struct dss_device *dss) default: break; } + of_node_put(port); } return 0; error: + of_node_put(port); __dss_uninit_ports(dss, i); return r; } diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index ac869acf80ea..61a27dd7392e 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -813,10 +813,8 @@ static int omap_dmm_probe(struct platform_device *dev) } omap_dmm->irq = platform_get_irq(dev, 0); - if (omap_dmm->irq < 0) { - dev_err(&dev->dev, "failed to get IRQ resource\n"); + if (omap_dmm->irq < 0) goto fail; - } omap_dmm->dev = &dev->dev; diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 060f4f98bc04..4b546b02d3ae 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -403,17 +403,10 @@ static int panel_edp_unprepare(struct drm_panel *panel) static int panel_edp_get_hpd_gpio(struct device *dev, struct panel_edp *p) { - int err; - p->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN); - if (IS_ERR(p->hpd_gpio)) { - err = PTR_ERR(p->hpd_gpio); - - if (err != -EPROBE_DEFER) - dev_err(dev, "failed to get 'hpd' GPIO: %d\n", err); - - return err; - } + if (IS_ERR(p->hpd_gpio)) + return dev_err_probe(dev, PTR_ERR(p->hpd_gpio), + "failed to get 'hpd' GPIO\n"); return 0; } @@ -832,12 +825,9 @@ static int panel_edp_probe(struct device *dev, const struct panel_desc *desc, panel->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); - if (IS_ERR(panel->enable_gpio)) { - err = PTR_ERR(panel->enable_gpio); - if (err != -EPROBE_DEFER) - dev_err(dev, "failed to request GPIO: %d\n", err); - return err; - } + if (IS_ERR(panel->enable_gpio)) + return dev_err_probe(dev, PTR_ERR(panel->enable_gpio), + "failed to request GPIO\n"); err = of_drm_get_panel_orientation(dev->of_node, &panel->orientation); if (err) { @@ -1878,6 +1868,7 @@ static const struct panel_delay delay_200_500_e200 = { static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('A', 'U', 'O', 0x1062, &delay_200_500_e50, "B120XAN01.0"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"), + EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"), @@ -1885,10 +1876,15 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('B', 'O', 'E', 0x0786, &delay_200_500_p2e80, "NV116WHM-T01"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x07d1, &boe_nv133fhm_n61.delay, "NV133FHM-N61"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x082d, &boe_nv133fhm_n61.delay, "NV133FHM-N62"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x094b, &delay_200_500_e50, "NT116WHM-N21"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x09dd, &delay_200_500_e50, "NT116WHM-N21"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x114c, &innolux_n116bca_ea1.delay, "N116BCA-EA1"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x1152, &delay_200_500_e80_d50, "N116BCN-EA1"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x1154, &delay_200_500_e80_d50, "N116BCA-EA2"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"), EDP_PANEL_ENTRY('I', 'V', 'O', 0x057d, &delay_200_500_e200, "R140NWF5 RH"), diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c index 7da09e34385d..39dc40cf681f 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c @@ -576,6 +576,7 @@ out_exit: } static const struct drm_simple_display_pipe_funcs ili9341_dbi_funcs = { + .mode_valid = mipi_dbi_pipe_mode_valid, .enable = ili9341_dbi_enable, .disable = mipi_dbi_pipe_disable, .update = mipi_dbi_pipe_update, diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index edd5a0c35437..252fd66011cc 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -575,12 +575,9 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) panel->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); - if (IS_ERR(panel->enable_gpio)) { - err = PTR_ERR(panel->enable_gpio); - if (err != -EPROBE_DEFER) - dev_err(dev, "failed to request GPIO: %d\n", err); - return err; - } + if (IS_ERR(panel->enable_gpio)) + return dev_err_probe(dev, PTR_ERR(panel->enable_gpio), + "failed to request GPIO\n"); err = of_drm_get_panel_orientation(dev->of_node, &panel->orientation); if (err) { @@ -2701,6 +2698,36 @@ static const struct panel_desc multi_inno_mi0700s4t_6 = { .connector_type = DRM_MODE_CONNECTOR_DPI, }; +static const struct display_timing multi_inno_mi0800ft_9_timing = { + .pixelclock = { 32000000, 40000000, 50000000 }, + .hactive = { 800, 800, 800 }, + .hfront_porch = { 16, 210, 354 }, + .hback_porch = { 6, 26, 45 }, + .hsync_len = { 1, 20, 40 }, + .vactive = { 600, 600, 600 }, + .vfront_porch = { 1, 12, 77 }, + .vback_porch = { 3, 13, 22 }, + .vsync_len = { 1, 10, 20 }, + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW | + DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE | + DISPLAY_FLAGS_SYNC_POSEDGE, +}; + +static const struct panel_desc multi_inno_mi0800ft_9 = { + .timings = &multi_inno_mi0800ft_9_timing, + .num_timings = 1, + .bpc = 8, + .size = { + .width = 162, + .height = 122, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | + DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE | + DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE, + .connector_type = DRM_MODE_CONNECTOR_DPI, +}; + static const struct display_timing multi_inno_mi1010ait_1cp_timing = { .pixelclock = { 68900000, 70000000, 73400000 }, .hactive = { 1280, 1280, 1280 }, @@ -4133,6 +4160,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "multi-inno,mi0700s4t-6", .data = &multi_inno_mi0700s4t_6, }, { + .compatible = "multi-inno,mi0800ft-9", + .data = &multi_inno_mi0800ft_9, + }, { .compatible = "multi-inno,mi1010ait-1cp", .data = &multi_inno_mi1010ait_1cp, }, { diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index 7b00c955cd82..63aa96a69752 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -53,17 +53,11 @@ void qxl_ring_free(struct qxl_ring *ring) kfree(ring); } -void qxl_ring_init_hdr(struct qxl_ring *ring) -{ - ring->ring->header.notify_on_prod = ring->n_elements; -} - struct qxl_ring * qxl_ring_create(struct qxl_ring_header *header, int element_size, int n_elements, int prod_notify, - bool set_prod_notify, wait_queue_head_t *push_event) { struct qxl_ring *ring; @@ -77,8 +71,6 @@ qxl_ring_create(struct qxl_ring_header *header, ring->n_elements = n_elements; ring->prod_notify = prod_notify; ring->push_event = push_event; - if (set_prod_notify) - qxl_ring_init_hdr(ring); spin_lock_init(&ring->lock); return ring; } diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 47c169673088..432758ad39a3 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -277,10 +277,8 @@ struct qxl_ring *qxl_ring_create(struct qxl_ring_header *header, int element_size, int n_elements, int prod_notify, - bool set_prod_notify, wait_queue_head_t *push_event); void qxl_ring_free(struct qxl_ring *ring); -void qxl_ring_init_hdr(struct qxl_ring *ring); int qxl_check_idle(struct qxl_ring *ring); static inline uint64_t diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c index 9bf6d4cc98d4..dc3828db1991 100644 --- a/drivers/gpu/drm/qxl/qxl_kms.c +++ b/drivers/gpu/drm/qxl/qxl_kms.c @@ -194,7 +194,6 @@ int qxl_device_init(struct qxl_device *qdev, sizeof(struct qxl_command), QXL_COMMAND_RING_SIZE, qdev->io_base + QXL_IO_NOTIFY_CMD, - false, &qdev->display_event); if (!qdev->command_ring) { DRM_ERROR("Unable to create command ring\n"); @@ -207,7 +206,6 @@ int qxl_device_init(struct qxl_device *qdev, sizeof(struct qxl_command), QXL_CURSOR_RING_SIZE, qdev->io_base + QXL_IO_NOTIFY_CURSOR, - false, &qdev->cursor_event); if (!qdev->cursor_ring) { @@ -219,7 +217,7 @@ int qxl_device_init(struct qxl_device *qdev, qdev->release_ring = qxl_ring_create( &(qdev->ram_header->release_ring_hdr), sizeof(uint64_t), - QXL_RELEASE_RING_SIZE, 0, true, + QXL_RELEASE_RING_SIZE, 0, NULL); if (!qdev->release_ring) { diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 0eae05dfb385..c841c273222e 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -32,6 +32,8 @@ #include <drm/drm_file.h> #include <drm/radeon_drm.h> +#include <acpi/video.h> + #include "atom.h" #include "radeon_atombios.h" #include "radeon.h" @@ -209,6 +211,11 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) return; + if (!acpi_video_backlight_use_native()) { + drm_info(dev, "Skipping radeon atom DIG backlight registration\n"); + return; + } + pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL); if (!pdata) { DRM_ERROR("Memory allocation failed\n"); diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 8be4799a98ef..638f861af80f 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -34,8 +34,6 @@ #include "r600_reg_safe.h" static int r600_nomm; -extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size); - struct r600_cs_track { /* configuration we mirror so that we use same code btw kms/ums */ diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 08f83bf2c330..166c18d62f6d 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -116,7 +116,6 @@ extern int radeon_use_pflipirq; extern int radeon_bapm; extern int radeon_backlight; extern int radeon_auxch; -extern int radeon_mst; extern int radeon_uvd; extern int radeon_vce; extern int radeon_si_support; @@ -2950,8 +2949,6 @@ struct radeon_hdmi_acr { }; -extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock); - extern u32 r6xx_remap_render_backend(struct radeon_device *rdev, u32 tiling_pipe_num, u32 max_rb_num, diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 35c535e48b8d..fbc0a2182318 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -30,6 +30,8 @@ #include <drm/drm_device.h> #include <drm/radeon_drm.h> +#include <acpi/video.h> + #include "radeon.h" #include "radeon_atombios.h" #include "radeon_legacy_encoders.h" @@ -167,7 +169,7 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder, return; if (radeon_backlight == 0) { - return; + use_bl = false; } else if (radeon_backlight == 1) { use_bl = true; } else if (radeon_backlight == -1) { @@ -193,6 +195,13 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder, else radeon_legacy_backlight_init(radeon_encoder, connector); } + + /* + * If there is no native backlight device (which may happen even when + * use_bl==true) try registering an ACPI video backlight device instead. + */ + if (!rdev->mode_info.bl_encoder) + acpi_video_register_backlight(); } void diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 1a66fb969ee7..0cd32c65456c 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -33,6 +33,8 @@ #include <drm/drm_util.h> #include <drm/radeon_drm.h> +#include <acpi/video.h> + #include "radeon.h" #include "radeon_asic.h" #include "radeon_legacy_encoders.h" @@ -387,6 +389,11 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, return; #endif + if (!acpi_video_backlight_use_native()) { + drm_info(dev, "Skipping radeon legacy LVDS backlight registration\n"); + return; + } + pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL); if (!pdata) { DRM_ERROR("Memory allocation failed\n"); diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 6a6a73204226..9f5be416454f 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -874,7 +874,6 @@ extern struct radeon_encoder_tv_dac * radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder); extern struct radeon_encoder_lvds * radeon_combios_get_lvds_info(struct radeon_encoder *encoder); -extern void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder); extern struct radeon_encoder_tv_dac * radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder); extern struct radeon_encoder_primary_dac * diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile index e7275b5e7ec8..6f132325c8b7 100644 --- a/drivers/gpu/drm/rcar-du/Makefile +++ b/drivers/gpu/drm/rcar-du/Makefile @@ -14,10 +14,3 @@ obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o obj-$(CONFIG_DRM_RCAR_LVDS) += rcar_lvds.o obj-$(CONFIG_DRM_RCAR_MIPI_DSI) += rcar_mipi_dsi.o - -# 'remote-endpoint' is fixed up at run-time -DTC_FLAGS_rcar_du_of_lvds_r8a7790 += -Wno-graph_endpoint -DTC_FLAGS_rcar_du_of_lvds_r8a7791 += -Wno-graph_endpoint -DTC_FLAGS_rcar_du_of_lvds_r8a7793 += -Wno-graph_endpoint -DTC_FLAGS_rcar_du_of_lvds_r8a7795 += -Wno-graph_endpoint -DTC_FLAGS_rcar_du_of_lvds_r8a7796 += -Wno-graph_endpoint diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index fd3b94649a01..3619e1ddeb62 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -29,6 +29,7 @@ #include "rcar_du_regs.h" #include "rcar_du_vsp.h" #include "rcar_lvds.h" +#include "rcar_mipi_dsi.h" static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg) { @@ -744,7 +745,19 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc, const struct drm_display_mode *mode = &crtc->state->adjusted_mode; - rcar_lvds_clk_enable(bridge, mode->clock * 1000); + rcar_lvds_pclk_enable(bridge, mode->clock * 1000); + } + + /* + * Similarly to LVDS, on V3U the dot clock is provided by the DSI + * encoder, and we need to enable the DSI clocks before enabling the CRTC. + */ + if ((rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) && + (rstate->outputs & + (BIT(RCAR_DU_OUTPUT_DSI0) | BIT(RCAR_DU_OUTPUT_DSI1)))) { + struct drm_bridge *bridge = rcdu->dsi[rcrtc->index]; + + rcar_mipi_dsi_pclk_enable(bridge, state); } rcar_du_crtc_start(rcrtc); @@ -777,7 +790,20 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc, * Disable the LVDS clock output, see * rcar_du_crtc_atomic_enable(). */ - rcar_lvds_clk_disable(bridge); + rcar_lvds_pclk_disable(bridge); + } + + if ((rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) && + (rstate->outputs & + (BIT(RCAR_DU_OUTPUT_DSI0) | BIT(RCAR_DU_OUTPUT_DSI1)))) { + struct drm_bridge *bridge = rcdu->dsi[rcrtc->index]; + + /* + * Disable the DSI clock output, see + * rcar_du_crtc_atomic_enable(). + */ + + rcar_mipi_dsi_pclk_disable(bridge); } spin_lock_irq(&crtc->dev->event_lock); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 00ac233a115e..a2776f1d6f2c 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -27,7 +27,6 @@ #include "rcar_du_drv.h" #include "rcar_du_kms.h" -#include "rcar_du_regs.h" /* ----------------------------------------------------------------------------- * Device Information @@ -507,7 +506,8 @@ static const struct rcar_du_device_info rcar_du_r8a7799x_info = { static const struct rcar_du_device_info rcar_du_r8a779a0_info = { .gen = 3, .features = RCAR_DU_FEATURE_CRTC_IRQ - | RCAR_DU_FEATURE_VSP1_SOURCE, + | RCAR_DU_FEATURE_VSP1_SOURCE + | RCAR_DU_FEATURE_NO_BLENDING, .channels_mask = BIT(1) | BIT(0), .routes = { /* R8A779A0 has two MIPI DSI outputs. */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index bfad7775d9a1..5cfa2bb7ad93 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -31,6 +31,7 @@ struct rcar_du_device; #define RCAR_DU_FEATURE_VSP1_SOURCE BIT(2) /* Has inputs from VSP1 */ #define RCAR_DU_FEATURE_INTERLACED BIT(3) /* HW supports interlaced */ #define RCAR_DU_FEATURE_TVM_SYNC BIT(4) /* Has TV switch/sync modes */ +#define RCAR_DU_FEATURE_NO_BLENDING BIT(5) /* PnMR.SPIM does not have ALP nor EOR bits */ #define RCAR_DU_QUIRK_ALIGN_128B BIT(0) /* Align pitches to 128 bytes */ @@ -91,6 +92,7 @@ struct rcar_du_device_info { #define RCAR_DU_MAX_GROUPS DIV_ROUND_UP(RCAR_DU_MAX_CRTCS, 2) #define RCAR_DU_MAX_VSPS 4 #define RCAR_DU_MAX_LVDS 2 +#define RCAR_DU_MAX_DSI 2 struct rcar_du_device { struct device *dev; @@ -107,6 +109,7 @@ struct rcar_du_device { struct platform_device *cmms[RCAR_DU_MAX_CRTCS]; struct rcar_du_vsp vsps[RCAR_DU_MAX_VSPS]; struct drm_bridge *lvds[RCAR_DU_MAX_LVDS]; + struct drm_bridge *dsi[RCAR_DU_MAX_DSI]; struct { struct drm_property *colorkey; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c index 60d6be78323b..b1787be31e92 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c @@ -9,18 +9,13 @@ #include <linux/export.h> #include <linux/of.h> -#include <linux/slab.h> #include <drm/drm_bridge.h> #include <drm/drm_bridge_connector.h> -#include <drm/drm_crtc.h> -#include <drm/drm_managed.h> -#include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_panel.h> #include "rcar_du_drv.h" #include "rcar_du_encoder.h" -#include "rcar_du_kms.h" #include "rcar_lvds.h" /* ----------------------------------------------------------------------------- @@ -84,6 +79,10 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu, if (output == RCAR_DU_OUTPUT_LVDS0 || output == RCAR_DU_OUTPUT_LVDS1) rcdu->lvds[output - RCAR_DU_OUTPUT_LVDS0] = bridge; + + if (output == RCAR_DU_OUTPUT_DSI0 || + output == RCAR_DU_OUTPUT_DSI1) + rcdu->dsi[output - RCAR_DU_OUTPUT_DSI0] = bridge; } /* diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index 21881fb5e84a..8c2719efda2a 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -405,8 +405,8 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, format = rcar_du_format_info(mode_cmd->pixel_format); if (format == NULL) { - dev_dbg(dev->dev, "unsupported pixel format %08x\n", - mode_cmd->pixel_format); + dev_dbg(dev->dev, "unsupported pixel format %p4cc\n", + &mode_cmd->pixel_format); return ERR_PTR(-EINVAL); } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c index 9e1f0cbbf642..d759e0192181 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c @@ -506,8 +506,15 @@ static void rcar_du_plane_setup_format_gen3(struct rcar_du_group *rgrp, unsigned int index, const struct rcar_du_plane_state *state) { - rcar_du_plane_write(rgrp, index, PnMR, - PnMR_SPIM_TP_OFF | state->format->pnmr); + struct rcar_du_device *rcdu = rgrp->dev; + u32 pnmr = state->format->pnmr | PnMR_SPIM_TP_OFF; + + if (rcdu->info->features & RCAR_DU_FEATURE_NO_BLENDING) { + /* No blending. ALP and EOR are not supported. */ + pnmr &= ~(PnMR_SPIM_ALP | PnMR_SPIM_EOR); + } + + rcar_du_plane_write(rgrp, index, PnMR, pnmr); rcar_du_plane_write(rgrp, index, PnDDCR4, state->format->edf | PnDDCR4_CODE); @@ -521,7 +528,6 @@ static void rcar_du_plane_setup_format_gen3(struct rcar_du_group *rgrp, * register to 0 to avoid this. */ - /* TODO: Check if alpha-blending should be disabled in PnMR. */ rcar_du_plane_write(rgrp, index, PnALPHAR, 0); } @@ -619,8 +625,8 @@ int __rcar_du_plane_atomic_check(struct drm_plane *plane, *format = rcar_du_format_info(state->fb->format->format); if (*format == NULL) { - dev_dbg(dev->dev, "%s: unsupported format %08x\n", __func__, - state->fb->format->format); + dev_dbg(dev->dev, "%s: unsupported format %p4cc\n", __func__, + &state->fb->format->format); return -EINVAL; } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c index 10b7f1d0877a..e465aef41585 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c @@ -152,6 +152,7 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane) .alpha = state->state.alpha >> 8, .zpos = state->state.zpos, }; + u32 fourcc = state->format->fourcc; unsigned int i; cfg.src.left = state->state.src.x1 >> 16; @@ -168,9 +169,27 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane) cfg.mem[i] = sg_dma_address(state->sg_tables[i].sgl) + fb->offsets[i]; - format = rcar_du_format_info(state->format->fourcc); + if (state->state.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE) { + switch (fourcc) { + case DRM_FORMAT_ARGB1555: + fourcc = DRM_FORMAT_XRGB1555; + break; + + case DRM_FORMAT_ARGB4444: + fourcc = DRM_FORMAT_XRGB4444; + break; + + case DRM_FORMAT_ARGB8888: + fourcc = DRM_FORMAT_XRGB8888; + break; + } + } + + format = rcar_du_format_info(fourcc); cfg.pixelformat = format->v4l2; + cfg.premult = state->state.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI; + vsp1_du_atomic_update(plane->vsp->vsp, crtc->vsp_pipe, plane->index, &cfg); } @@ -436,6 +455,11 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, drm_plane_create_zpos_property(&plane->plane, i, 0, num_planes - 1); + drm_plane_create_blend_mode_property(&plane->plane, + BIT(DRM_MODE_BLEND_PIXEL_NONE) | + BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE)); + vsp->num_planes++; } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c index 25f50a297c11..8cd37d7b8ae2 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c @@ -166,8 +166,8 @@ static int rcar_du_wb_enc_atomic_check(struct drm_encoder *encoder, wb_state->format = rcar_du_format_info(fb->format->format); if (wb_state->format == NULL) { - dev_dbg(dev->dev, "%s: unsupported format %08x\n", __func__, - fb->format->format); + dev_dbg(dev->dev, "%s: unsupported format %p4cc\n", __func__, + &fb->format->format); return -EINVAL; } diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c index d85aa4bc7f84..81a060c2fe3f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds.c +++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c @@ -306,7 +306,7 @@ static void rcar_lvds_pll_setup_d3_e3(struct rcar_lvds *lvds, unsigned int freq) * Clock - D3/E3 only */ -int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq) +int rcar_lvds_pclk_enable(struct drm_bridge *bridge, unsigned long freq) { struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); int ret; @@ -324,9 +324,9 @@ int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq) return 0; } -EXPORT_SYMBOL_GPL(rcar_lvds_clk_enable); +EXPORT_SYMBOL_GPL(rcar_lvds_pclk_enable); -void rcar_lvds_clk_disable(struct drm_bridge *bridge) +void rcar_lvds_pclk_disable(struct drm_bridge *bridge) { struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); @@ -339,7 +339,7 @@ void rcar_lvds_clk_disable(struct drm_bridge *bridge) clk_disable_unprepare(lvds->clocks.mod); } -EXPORT_SYMBOL_GPL(rcar_lvds_clk_disable); +EXPORT_SYMBOL_GPL(rcar_lvds_pclk_disable); /* ----------------------------------------------------------------------------- * Bridge diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.h b/drivers/gpu/drm/rcar-du/rcar_lvds.h index 3097bf749bec..bee7033b60d6 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds.h +++ b/drivers/gpu/drm/rcar-du/rcar_lvds.h @@ -13,17 +13,17 @@ struct drm_bridge; #if IS_ENABLED(CONFIG_DRM_RCAR_LVDS) -int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq); -void rcar_lvds_clk_disable(struct drm_bridge *bridge); +int rcar_lvds_pclk_enable(struct drm_bridge *bridge, unsigned long freq); +void rcar_lvds_pclk_disable(struct drm_bridge *bridge); bool rcar_lvds_dual_link(struct drm_bridge *bridge); bool rcar_lvds_is_connected(struct drm_bridge *bridge); #else -static inline int rcar_lvds_clk_enable(struct drm_bridge *bridge, - unsigned long freq) +static inline int rcar_lvds_pclk_enable(struct drm_bridge *bridge, + unsigned long freq) { return -ENOSYS; } -static inline void rcar_lvds_clk_disable(struct drm_bridge *bridge) { } +static inline void rcar_lvds_pclk_disable(struct drm_bridge *bridge) { } static inline bool rcar_lvds_dual_link(struct drm_bridge *bridge) { return false; diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c index 62f7eb84ab01..a7f2b7f66a17 100644 --- a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c +++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c @@ -25,6 +25,7 @@ #include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> +#include "rcar_mipi_dsi.h" #include "rcar_mipi_dsi_regs.h" struct rcar_mipi_dsi { @@ -414,7 +415,7 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi, /* Enable DOT clock */ vclkset = VCLKSET_CKEN; - rcar_mipi_dsi_set(dsi, VCLKSET, vclkset); + rcar_mipi_dsi_write(dsi, VCLKSET, vclkset); if (dsi_format == 24) vclkset |= VCLKSET_BPP_24; @@ -429,7 +430,7 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi, vclkset |= VCLKSET_COLOR_RGB | VCLKSET_DIV(setup_info.div) | VCLKSET_LANE(dsi->lanes - 1); - rcar_mipi_dsi_set(dsi, VCLKSET, vclkset); + rcar_mipi_dsi_write(dsi, VCLKSET, vclkset); /* After setting VCLKSET register, enable VCLKEN */ rcar_mipi_dsi_set(dsi, VCLKEN, VCLKEN_CKEN); @@ -441,9 +442,21 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi, static void rcar_mipi_dsi_shutdown(struct rcar_mipi_dsi *dsi) { + /* Disable VCLKEN */ + rcar_mipi_dsi_write(dsi, VCLKSET, 0); + + /* Disable DOT clock */ + rcar_mipi_dsi_write(dsi, VCLKSET, 0); + rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_RSTZ); rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ); + /* CFGCLK disable */ + rcar_mipi_dsi_clr(dsi, CFGCLKSET, CFGCLKSET_CKEN); + + /* LPCLK disable */ + rcar_mipi_dsi_clr(dsi, LPCLKSET, LPCLKSET_CKEN); + dev_dbg(dsi->dev, "DSI device is shutdown\n"); } @@ -542,6 +555,34 @@ static int rcar_mipi_dsi_start_video(struct rcar_mipi_dsi *dsi) return 0; } +static void rcar_mipi_dsi_stop_video(struct rcar_mipi_dsi *dsi) +{ + u32 status; + int ret; + + /* Disable transmission in video mode. */ + rcar_mipi_dsi_clr(dsi, TXVMCR, TXVMCR_EN_VIDEO); + + ret = read_poll_timeout(rcar_mipi_dsi_read, status, + !(status & TXVMSR_ACT), + 2000, 100000, false, dsi, TXVMSR); + if (ret < 0) { + dev_err(dsi->dev, "Failed to disable video transmission\n"); + return; + } + + /* Assert video FIFO clear. */ + rcar_mipi_dsi_set(dsi, TXVMCR, TXVMCR_VFCLR); + + ret = read_poll_timeout(rcar_mipi_dsi_read, status, + !(status & TXVMSR_VFRDY), + 2000, 100000, false, dsi, TXVMSR); + if (ret < 0) { + dev_err(dsi->dev, "Failed to assert video FIFO clear\n"); + return; + } +} + /* ----------------------------------------------------------------------------- * Bridge */ @@ -558,7 +599,22 @@ static int rcar_mipi_dsi_attach(struct drm_bridge *bridge, static void rcar_mipi_dsi_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { - struct drm_atomic_state *state = old_bridge_state->base.state; + struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge); + + rcar_mipi_dsi_start_video(dsi); +} + +static void rcar_mipi_dsi_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge); + + rcar_mipi_dsi_stop_video(dsi); +} + +void rcar_mipi_dsi_pclk_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge); const struct drm_display_mode *mode; struct drm_connector *connector; @@ -586,8 +642,6 @@ static void rcar_mipi_dsi_atomic_enable(struct drm_bridge *bridge, if (ret < 0) goto err_dsi_start_hs; - rcar_mipi_dsi_start_video(dsi); - return; err_dsi_start_hs: @@ -595,15 +649,16 @@ err_dsi_start_hs: err_dsi_startup: rcar_mipi_dsi_clk_disable(dsi); } +EXPORT_SYMBOL_GPL(rcar_mipi_dsi_pclk_enable); -static void rcar_mipi_dsi_atomic_disable(struct drm_bridge *bridge, - struct drm_bridge_state *old_bridge_state) +void rcar_mipi_dsi_pclk_disable(struct drm_bridge *bridge) { struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge); rcar_mipi_dsi_shutdown(dsi); rcar_mipi_dsi_clk_disable(dsi); } +EXPORT_SYMBOL_GPL(rcar_mipi_dsi_pclk_disable); static enum drm_mode_status rcar_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge, diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.h b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.h new file mode 100644 index 000000000000..528a196e6edd --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * R-Car DSI Encoder + * + * Copyright (C) 2022 Renesas Electronics Corporation + * + * Contact: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com> + */ + +#ifndef __RCAR_MIPI_DSI_H__ +#define __RCAR_MIPI_DSI_H__ + +struct drm_atomic_state; +struct drm_bridge; + +#if IS_ENABLED(CONFIG_DRM_RCAR_MIPI_DSI) +void rcar_mipi_dsi_pclk_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state); +void rcar_mipi_dsi_pclk_disable(struct drm_bridge *bridge); +#else +static inline void rcar_mipi_dsi_pclk_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ +} + +static inline void rcar_mipi_dsi_pclk_disable(struct drm_bridge *bridge) +{ +} +#endif /* CONFIG_DRM_RCAR_MIPI_DSI */ + +#endif /* __RCAR_MIPI_DSI_H__ */ diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c index 110e83aad9bb..bf6948125b84 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c @@ -179,6 +179,23 @@ #define RK3399_TXRX_SRC_SEL_ISP0 BIT(4) #define RK3399_TXRX_TURNREQUEST GENMASK(3, 0) +#define RK3568_GRF_VO_CON2 0x0368 +#define RK3568_DSI0_SKEWCALHS (0x1f << 11) +#define RK3568_DSI0_FORCETXSTOPMODE (0xf << 4) +#define RK3568_DSI0_TURNDISABLE BIT(2) +#define RK3568_DSI0_FORCERXMODE BIT(0) + +/* + * Note these registers do not appear in the datasheet, they are + * however present in the BSP driver which is where these values + * come from. Name GRF_VO_CON3 is assumed. + */ +#define RK3568_GRF_VO_CON3 0x36c +#define RK3568_DSI1_SKEWCALHS (0x1f << 11) +#define RK3568_DSI1_FORCETXSTOPMODE (0xf << 4) +#define RK3568_DSI1_TURNDISABLE BIT(2) +#define RK3568_DSI1_FORCERXMODE BIT(0) + #define HIWORD_UPDATE(val, mask) (val | (mask) << 16) enum { @@ -735,8 +752,9 @@ static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi) static void dw_mipi_dsi_rockchip_set_lcdsel(struct dw_mipi_dsi_rockchip *dsi, int mux) { - regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg, - mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big); + if (dsi->cdata->lcdsel_grf_reg < 0) + regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg, + mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big); } static int @@ -963,6 +981,8 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev, DRM_DEV_ERROR(dev, "Failed to create drm encoder\n"); goto out_pll_clk; } + rockchip_drm_encoder_set_crtc_endpoint_id(&dsi->encoder, + dev->of_node, 0, 0); ret = dw_mipi_dsi_bind(dsi->dmd, &dsi->encoder.encoder); if (ret) { @@ -1612,6 +1632,30 @@ static const struct rockchip_dw_dsi_chip_data rk3399_chip_data[] = { { /* sentinel */ } }; +static const struct rockchip_dw_dsi_chip_data rk3568_chip_data[] = { + { + .reg = 0xfe060000, + .lcdsel_grf_reg = -1, + .lanecfg1_grf_reg = RK3568_GRF_VO_CON2, + .lanecfg1 = HIWORD_UPDATE(0, RK3568_DSI0_SKEWCALHS | + RK3568_DSI0_FORCETXSTOPMODE | + RK3568_DSI0_TURNDISABLE | + RK3568_DSI0_FORCERXMODE), + .max_data_lanes = 4, + }, + { + .reg = 0xfe070000, + .lcdsel_grf_reg = -1, + .lanecfg1_grf_reg = RK3568_GRF_VO_CON3, + .lanecfg1 = HIWORD_UPDATE(0, RK3568_DSI1_SKEWCALHS | + RK3568_DSI1_FORCETXSTOPMODE | + RK3568_DSI1_TURNDISABLE | + RK3568_DSI1_FORCERXMODE), + .max_data_lanes = 4, + }, + { /* sentinel */ } +}; + static const struct of_device_id dw_mipi_dsi_rockchip_dt_ids[] = { { .compatible = "rockchip,px30-mipi-dsi", @@ -1622,6 +1666,9 @@ static const struct of_device_id dw_mipi_dsi_rockchip_dt_ids[] = { }, { .compatible = "rockchip,rk3399-mipi-dsi", .data = &rk3399_chip_data, + }, { + .compatible = "rockchip,rk3568-mipi-dsi", + .data = &rk3568_chip_data, }, { /* sentinel */ } }; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index bdd48f87d098..c356de5dd220 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -9,6 +9,7 @@ #include <linux/delay.h> #include <linux/iopoll.h> #include <linux/kernel.h> +#include <linux/log2.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> @@ -67,6 +68,9 @@ #define VOP_REG_SET(vop, group, name, v) \ vop_reg_set(vop, &vop->data->group->name, 0, ~0, v, #name) +#define VOP_HAS_REG(vop, group, name) \ + (!!(vop->data->group->name.mask)) + #define VOP_INTR_SET_TYPE(vop, name, type, v) \ do { \ int i, reg = 0, mask = 0; \ @@ -184,12 +188,6 @@ struct vop { struct vop_win win[]; }; -static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v) -{ - writel(v, vop->regs + offset); - vop->regsbak[offset >> 2] = v; -} - static inline uint32_t vop_readl(struct vop *vop, uint32_t offset) { return readl(vop->regs + offset); @@ -1188,7 +1186,7 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, * * Key points: * - * - DRM works in in kHz. + * - DRM works in kHz. * - Clock framework works in Hz. * - Rockchip's clock driver picks the clock rate that is the * same _OR LOWER_ than the one requested. @@ -1223,17 +1221,22 @@ static bool vop_dsp_lut_is_enabled(struct vop *vop) return vop_read_reg(vop, 0, &vop->data->common->dsp_lut_en); } +static u32 vop_lut_buffer_index(struct vop *vop) +{ + return vop_read_reg(vop, 0, &vop->data->common->lut_buffer_index); +} + static void vop_crtc_write_gamma_lut(struct vop *vop, struct drm_crtc *crtc) { struct drm_color_lut *lut = crtc->state->gamma_lut->data; - unsigned int i; + unsigned int i, bpc = ilog2(vop->data->lut_size); for (i = 0; i < crtc->gamma_size; i++) { u32 word; - word = (drm_color_lut_extract(lut[i].red, 10) << 20) | - (drm_color_lut_extract(lut[i].green, 10) << 10) | - drm_color_lut_extract(lut[i].blue, 10); + word = (drm_color_lut_extract(lut[i].red, bpc) << (2 * bpc)) | + (drm_color_lut_extract(lut[i].green, bpc) << bpc) | + drm_color_lut_extract(lut[i].blue, bpc); writel(word, vop->lut_regs + i * 4); } } @@ -1243,38 +1246,66 @@ static void vop_crtc_gamma_set(struct vop *vop, struct drm_crtc *crtc, { struct drm_crtc_state *state = crtc->state; unsigned int idle; + u32 lut_idx, old_idx; int ret; if (!vop->lut_regs) return; - /* - * To disable gamma (gamma_lut is null) or to write - * an update to the LUT, clear dsp_lut_en. - */ - spin_lock(&vop->reg_lock); - VOP_REG_SET(vop, common, dsp_lut_en, 0); - vop_cfg_done(vop); - spin_unlock(&vop->reg_lock); - /* - * In order to write the LUT to the internal memory, - * we need to first make sure the dsp_lut_en bit is cleared. - */ - ret = readx_poll_timeout(vop_dsp_lut_is_enabled, vop, - idle, !idle, 5, 30 * 1000); - if (ret) { - DRM_DEV_ERROR(vop->dev, "display LUT RAM enable timeout!\n"); - return; - } + if (!state->gamma_lut || !VOP_HAS_REG(vop, common, update_gamma_lut)) { + /* + * To disable gamma (gamma_lut is null) or to write + * an update to the LUT, clear dsp_lut_en. + */ + spin_lock(&vop->reg_lock); + VOP_REG_SET(vop, common, dsp_lut_en, 0); + vop_cfg_done(vop); + spin_unlock(&vop->reg_lock); - if (!state->gamma_lut) - return; + /* + * In order to write the LUT to the internal memory, + * we need to first make sure the dsp_lut_en bit is cleared. + */ + ret = readx_poll_timeout(vop_dsp_lut_is_enabled, vop, + idle, !idle, 5, 30 * 1000); + if (ret) { + DRM_DEV_ERROR(vop->dev, "display LUT RAM enable timeout!\n"); + return; + } + + if (!state->gamma_lut) + return; + } else { + /* + * On RK3399 the gamma LUT can updated without clearing dsp_lut_en, + * by setting update_gamma_lut then waiting for lut_buffer_index change + */ + old_idx = vop_lut_buffer_index(vop); + } spin_lock(&vop->reg_lock); vop_crtc_write_gamma_lut(vop, crtc); VOP_REG_SET(vop, common, dsp_lut_en, 1); + VOP_REG_SET(vop, common, update_gamma_lut, 1); vop_cfg_done(vop); spin_unlock(&vop->reg_lock); + + if (VOP_HAS_REG(vop, common, update_gamma_lut)) { + ret = readx_poll_timeout(vop_lut_buffer_index, vop, + lut_idx, lut_idx != old_idx, 5, 30 * 1000); + if (ret) { + DRM_DEV_ERROR(vop->dev, "gamma LUT update timeout!\n"); + return; + } + + /* + * update_gamma_lut is auto cleared by HW, but write 0 to clear the bit + * in our backup of the regs. + */ + spin_lock(&vop->reg_lock); + VOP_REG_SET(vop, common, update_gamma_lut, 0); + spin_unlock(&vop->reg_lock); + } } static void vop_crtc_atomic_begin(struct drm_crtc *crtc, @@ -1324,14 +1355,6 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc, return; } - /* - * If we have a GAMMA LUT in the state, then let's make sure - * it's updated. We might be coming out of suspend, - * which means the LUT internal memory needs to be re-written. - */ - if (crtc->state->gamma_lut) - vop_crtc_gamma_set(vop, crtc, old_state); - mutex_lock(&vop->vop_lock); WARN_ON(vop->event); @@ -1422,6 +1445,14 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc, VOP_REG_SET(vop, common, standby, 0); mutex_unlock(&vop->vop_lock); + + /* + * If we have a GAMMA LUT in the state, then let's make sure + * it's updated. We might be coming out of suspend, + * which means the LUT internal memory needs to be re-written. + */ + if (crtc->state->gamma_lut) + vop_crtc_gamma_set(vop, crtc, old_state); } static bool vop_fs_irq_is_pending(struct vop *vop) @@ -2147,8 +2178,8 @@ static int vop_bind(struct device *dev, struct device *master, void *data) res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (res) { - if (!vop_data->lut_size) { - DRM_DEV_ERROR(dev, "no gamma LUT size defined\n"); + if (vop_data->lut_size != 1024 && vop_data->lut_size != 256) { + DRM_DEV_ERROR(dev, "unsupported gamma LUT size %d\n", vop_data->lut_size); return -EINVAL; } vop->lut_regs = devm_ioremap_resource(dev, res); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h index ba88addc1a75..8502849833d9 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h @@ -113,6 +113,8 @@ struct vop_common { struct vop_reg dither_down_en; struct vop_reg dither_up; struct vop_reg dsp_lut_en; + struct vop_reg update_gamma_lut; + struct vop_reg lut_buffer_index; struct vop_reg gate_en; struct vop_reg mmu_en; struct vop_reg out_mode; diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c index d03dd0402923..014f99e8928e 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c @@ -875,6 +875,24 @@ static const struct vop_output rk3399_output = { .mipi_dual_channel_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 3), }; +static const struct vop_common rk3399_common = { + .standby = VOP_REG_SYNC(RK3399_SYS_CTRL, 0x1, 22), + .gate_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 23), + .mmu_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 20), + .dither_down_sel = VOP_REG(RK3399_DSP_CTRL1, 0x1, 4), + .dither_down_mode = VOP_REG(RK3399_DSP_CTRL1, 0x1, 3), + .dither_down_en = VOP_REG(RK3399_DSP_CTRL1, 0x1, 2), + .pre_dither_down = VOP_REG(RK3399_DSP_CTRL1, 0x1, 1), + .dither_up = VOP_REG(RK3399_DSP_CTRL1, 0x1, 6), + .dsp_lut_en = VOP_REG(RK3399_DSP_CTRL1, 0x1, 0), + .update_gamma_lut = VOP_REG(RK3399_DSP_CTRL1, 0x1, 7), + .lut_buffer_index = VOP_REG(RK3399_DBG_POST_REG1, 0x1, 1), + .data_blank = VOP_REG(RK3399_DSP_CTRL0, 0x1, 19), + .dsp_blank = VOP_REG(RK3399_DSP_CTRL0, 0x3, 18), + .out_mode = VOP_REG(RK3399_DSP_CTRL0, 0xf, 0), + .cfg_done = VOP_REG_SYNC(RK3399_REG_CFG_DONE, 0x1, 0), +}; + static const struct vop_yuv2yuv_phy rk3399_yuv2yuv_win01_data = { .y2r_coefficients = { VOP_REG(RK3399_WIN0_YUV2YUV_Y2R + 0, 0xffff, 0), @@ -957,7 +975,7 @@ static const struct vop_data rk3399_vop_big = { .version = VOP_VERSION(3, 5), .feature = VOP_FEATURE_OUTPUT_RGB10, .intr = &rk3366_vop_intr, - .common = &rk3288_common, + .common = &rk3399_common, .modeset = &rk3288_modeset, .output = &rk3399_output, .afbc = &rk3399_vop_afbc, @@ -965,6 +983,7 @@ static const struct vop_data rk3399_vop_big = { .win = rk3399_vop_win_data, .win_size = ARRAY_SIZE(rk3399_vop_win_data), .win_yuv2yuv = rk3399_vop_big_win_yuv2yuv_data, + .lut_size = 1024, }; static const struct vop_win_data rk3399_vop_lit_win_data[] = { @@ -983,13 +1002,14 @@ static const struct vop_win_yuv2yuv_data rk3399_vop_lit_win_yuv2yuv_data[] = { static const struct vop_data rk3399_vop_lit = { .version = VOP_VERSION(3, 6), .intr = &rk3366_vop_intr, - .common = &rk3288_common, + .common = &rk3399_common, .modeset = &rk3288_modeset, .output = &rk3399_output, .misc = &rk3368_misc, .win = rk3399_vop_lit_win_data, .win_size = ARRAY_SIZE(rk3399_vop_lit_win_data), .win_yuv2yuv = rk3399_vop_lit_win_yuv2yuv_data, + .lut_size = 256, }; static const struct vop_win_data rk3228_vop_win_data[] = { diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h index 0b3cd65ba5c1..406e981c75bd 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h +++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h @@ -628,6 +628,7 @@ #define RK3399_YUV2YUV_WIN 0x02c0 #define RK3399_YUV2YUV_POST 0x02c4 #define RK3399_AUTO_GATING_EN 0x02cc +#define RK3399_DBG_POST_REG1 0x036c #define RK3399_WIN0_CSC_COE 0x03a0 #define RK3399_WIN1_CSC_COE 0x03c0 #define RK3399_WIN2_CSC_COE 0x03e0 diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index e5a4ecde0063..4f2395d1a791 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -829,7 +829,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) job = list_first_entry_or_null(&sched->pending_list, struct drm_sched_job, list); - if (job && dma_fence_is_signaled(&job->s_fence->finished)) { + if (job && dma_fence_is_signaled(job->s_fence->parent)) { /* remove job from pending_list */ list_del_init(&job->list); @@ -841,7 +841,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) if (next) { next->s_fence->scheduled.timestamp = - job->s_fence->finished.timestamp; + job->s_fence->parent->timestamp; /* start TO timer for next job */ drm_sched_start_timeout(sched); } diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/shmobile/shmob_drm_plane.c index 54228424793a..6c5f0cbe7d95 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_plane.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.c @@ -252,9 +252,10 @@ int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index) splane->index = index; splane->alpha = 255; - ret = drm_plane_init(sdev->ddev, &splane->plane, 1, - &shmob_drm_plane_funcs, formats, - ARRAY_SIZE(formats), false); + ret = drm_universal_plane_init(sdev->ddev, &splane->plane, 1, + &shmob_drm_plane_funcs, + formats, ARRAY_SIZE(formats), NULL, + DRM_PLANE_TYPE_OVERLAY, NULL); return ret; } diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c index c95221fff474..bc41a5ae810a 100644 --- a/drivers/gpu/drm/solomon/ssd130x.c +++ b/drivers/gpu/drm/solomon/ssd130x.c @@ -555,37 +555,28 @@ static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb, const struct iosys_m if (!buf) return -ENOMEM; + ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); + if (ret) + goto out_free; + iosys_map_set_vaddr(&dst, buf); drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect); + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); + ssd130x_update_rect(ssd130x, buf, rect); +out_free: kfree(buf); return ret; } -static int ssd130x_primary_plane_helper_atomic_check(struct drm_plane *plane, - struct drm_atomic_state *new_state) -{ - struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane); - struct drm_crtc *new_crtc = new_plane_state->crtc; - struct drm_crtc_state *new_crtc_state = NULL; - - if (new_crtc) - new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_crtc); - - return drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state, - DRM_PLANE_NO_SCALING, - DRM_PLANE_NO_SCALING, - false, false); -} - static void ssd130x_primary_plane_helper_atomic_update(struct drm_plane *plane, - struct drm_atomic_state *old_state) + struct drm_atomic_state *state) { - struct drm_plane_state *plane_state = plane->state; - struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(old_state, plane); + struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane); struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); struct drm_device *drm = plane->dev; struct drm_rect src_clip, dst_clip; @@ -607,7 +598,7 @@ static void ssd130x_primary_plane_helper_atomic_update(struct drm_plane *plane, } static void ssd130x_primary_plane_helper_atomic_disable(struct drm_plane *plane, - struct drm_atomic_state *old_state) + struct drm_atomic_state *state) { struct drm_device *drm = plane->dev; struct ssd130x_device *ssd130x = drm_to_ssd130x(drm); @@ -623,7 +614,7 @@ static void ssd130x_primary_plane_helper_atomic_disable(struct drm_plane *plane, static const struct drm_plane_helper_funcs ssd130x_primary_plane_helper_funcs = { DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, - .atomic_check = ssd130x_primary_plane_helper_atomic_check, + .atomic_check = drm_plane_helper_atomic_check, .atomic_update = ssd130x_primary_plane_helper_atomic_update, .atomic_disable = ssd130x_primary_plane_helper_atomic_disable, }; diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c index d76f83833e75..7a2b2d6bc3fe 100644 --- a/drivers/gpu/drm/tests/drm_buddy_test.c +++ b/drivers/gpu/drm/tests/drm_buddy_test.c @@ -13,7 +13,7 @@ #include "../lib/drm_random.h" -#define IGT_TIMEOUT(name__) \ +#define TIMEOUT(name__) \ unsigned long name__ = jiffies + MAX_SCHEDULE_TIMEOUT static unsigned int random_seed; @@ -24,7 +24,7 @@ static inline u64 get_size(int order, u64 chunk_size) } __printf(2, 3) -static bool __igt_timeout(unsigned long timeout, const char *fmt, ...) +static bool __timeout(unsigned long timeout, const char *fmt, ...) { va_list va; @@ -43,8 +43,8 @@ static bool __igt_timeout(unsigned long timeout, const char *fmt, ...) return true; } -static void __igt_dump_block(struct kunit *test, struct drm_buddy *mm, - struct drm_buddy_block *block, bool buddy) +static void __dump_block(struct kunit *test, struct drm_buddy *mm, + struct drm_buddy_block *block, bool buddy) { kunit_err(test, "block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%d buddy=%d\n", block->header, drm_buddy_block_state(block), @@ -52,20 +52,20 @@ static void __igt_dump_block(struct kunit *test, struct drm_buddy *mm, drm_buddy_block_size(mm, block), !block->parent, buddy); } -static void igt_dump_block(struct kunit *test, struct drm_buddy *mm, - struct drm_buddy_block *block) +static void dump_block(struct kunit *test, struct drm_buddy *mm, + struct drm_buddy_block *block) { struct drm_buddy_block *buddy; - __igt_dump_block(test, mm, block, false); + __dump_block(test, mm, block, false); buddy = drm_get_buddy(block); if (buddy) - __igt_dump_block(test, mm, buddy, true); + __dump_block(test, mm, buddy, true); } -static int igt_check_block(struct kunit *test, struct drm_buddy *mm, - struct drm_buddy_block *block) +static int check_block(struct kunit *test, struct drm_buddy *mm, + struct drm_buddy_block *block) { struct drm_buddy_block *buddy; unsigned int block_state; @@ -137,8 +137,8 @@ static int igt_check_block(struct kunit *test, struct drm_buddy *mm, return err; } -static int igt_check_blocks(struct kunit *test, struct drm_buddy *mm, - struct list_head *blocks, u64 expected_size, bool is_contiguous) +static int check_blocks(struct kunit *test, struct drm_buddy *mm, + struct list_head *blocks, u64 expected_size, bool is_contiguous) { struct drm_buddy_block *block; struct drm_buddy_block *prev; @@ -150,7 +150,7 @@ static int igt_check_blocks(struct kunit *test, struct drm_buddy *mm, total = 0; list_for_each_entry(block, blocks, link) { - err = igt_check_block(test, mm, block); + err = check_block(test, mm, block); if (!drm_buddy_block_is_allocated(block)) { kunit_err(test, "block not allocated\n"); @@ -190,16 +190,16 @@ static int igt_check_blocks(struct kunit *test, struct drm_buddy *mm, if (prev) { kunit_err(test, "prev block, dump:\n"); - igt_dump_block(test, mm, prev); + dump_block(test, mm, prev); } kunit_err(test, "bad block, dump:\n"); - igt_dump_block(test, mm, block); + dump_block(test, mm, block); return err; } -static int igt_check_mm(struct kunit *test, struct drm_buddy *mm) +static int check_mm(struct kunit *test, struct drm_buddy *mm) { struct drm_buddy_block *root; struct drm_buddy_block *prev; @@ -233,7 +233,7 @@ static int igt_check_mm(struct kunit *test, struct drm_buddy *mm) break; } - err = igt_check_block(test, mm, root); + err = check_block(test, mm, root); if (!drm_buddy_block_is_free(root)) { kunit_err(test, "root not free\n"); @@ -289,18 +289,18 @@ static int igt_check_mm(struct kunit *test, struct drm_buddy *mm) if (prev) { kunit_err(test, "prev root(%u), dump:\n", i - 1); - igt_dump_block(test, mm, prev); + dump_block(test, mm, prev); } if (root) { kunit_err(test, "bad root(%u), dump:\n", i); - igt_dump_block(test, mm, root); + dump_block(test, mm, root); } return err; } -static void igt_mm_config(u64 *size, u64 *chunk_size) +static void mm_config(u64 *size, u64 *chunk_size) { DRM_RND_STATE(prng, random_seed); u32 s, ms; @@ -321,7 +321,7 @@ static void igt_mm_config(u64 *size, u64 *chunk_size) *size = (u64)s << 12; } -static void igt_buddy_alloc_pathological(struct kunit *test) +static void drm_test_buddy_alloc_pathological(struct kunit *test) { u64 mm_size, size, start = 0; struct drm_buddy_block *block; @@ -402,7 +402,7 @@ static void igt_buddy_alloc_pathological(struct kunit *test) drm_buddy_fini(&mm); } -static void igt_buddy_alloc_smoke(struct kunit *test) +static void drm_test_buddy_alloc_smoke(struct kunit *test) { u64 mm_size, chunk_size, start = 0; unsigned long flags = 0; @@ -411,9 +411,9 @@ static void igt_buddy_alloc_smoke(struct kunit *test) int i; DRM_RND_STATE(prng, random_seed); - IGT_TIMEOUT(end_time); + TIMEOUT(end_time); - igt_mm_config(&mm_size, &chunk_size); + mm_config(&mm_size, &chunk_size); KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, chunk_size), "buddy_init failed\n"); @@ -430,7 +430,7 @@ static void igt_buddy_alloc_smoke(struct kunit *test) LIST_HEAD(tmp); int order, err; - KUNIT_ASSERT_FALSE_MSG(test, igt_check_mm(test, &mm), + KUNIT_ASSERT_FALSE_MSG(test, check_mm(test, &mm), "pre-mm check failed, abort\n"); order = max_order; @@ -466,19 +466,19 @@ retry: total += drm_buddy_block_size(&mm, block); - if (__igt_timeout(end_time, NULL)) { + if (__timeout(end_time, NULL)) { timeout = true; break; } } while (total < mm.size); if (!err) - err = igt_check_blocks(test, &mm, &blocks, total, false); + err = check_blocks(test, &mm, &blocks, total, false); drm_buddy_free_list(&mm, &blocks); if (!err) { - KUNIT_EXPECT_FALSE_MSG(test, igt_check_mm(test, &mm), + KUNIT_EXPECT_FALSE_MSG(test, check_mm(test, &mm), "post-mm check failed\n"); } @@ -492,7 +492,7 @@ retry: drm_buddy_fini(&mm); } -static void igt_buddy_alloc_pessimistic(struct kunit *test) +static void drm_test_buddy_alloc_pessimistic(struct kunit *test) { u64 mm_size, size, start = 0; struct drm_buddy_block *block, *bn; @@ -587,7 +587,7 @@ static void igt_buddy_alloc_pessimistic(struct kunit *test) drm_buddy_fini(&mm); } -static void igt_buddy_alloc_optimistic(struct kunit *test) +static void drm_test_buddy_alloc_optimistic(struct kunit *test) { u64 mm_size, size, start = 0; struct drm_buddy_block *block; @@ -633,7 +633,7 @@ static void igt_buddy_alloc_optimistic(struct kunit *test) drm_buddy_fini(&mm); } -static void igt_buddy_alloc_range(struct kunit *test) +static void drm_test_buddy_alloc_range(struct kunit *test) { unsigned long flags = DRM_BUDDY_RANGE_ALLOCATION; u64 offset, size, rem, chunk_size, end; @@ -641,12 +641,12 @@ static void igt_buddy_alloc_range(struct kunit *test) struct drm_buddy mm; LIST_HEAD(blocks); - igt_mm_config(&size, &chunk_size); + mm_config(&size, &chunk_size); KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, size, chunk_size), "buddy_init failed"); - KUNIT_ASSERT_FALSE_MSG(test, igt_check_mm(test, &mm), + KUNIT_ASSERT_FALSE_MSG(test, check_mm(test, &mm), "pre-mm check failed, abort!"); rem = mm.size; @@ -671,7 +671,7 @@ static void igt_buddy_alloc_range(struct kunit *test) "alloc_range start offset mismatch, found=%llx, expected=%llx\n", drm_buddy_block_offset(block), offset); - KUNIT_ASSERT_FALSE(test, igt_check_blocks(test, &mm, &tmp, size, true)); + KUNIT_ASSERT_FALSE(test, check_blocks(test, &mm, &tmp, size, true)); list_splice_tail(&tmp, &blocks); @@ -686,12 +686,12 @@ static void igt_buddy_alloc_range(struct kunit *test) drm_buddy_free_list(&mm, &blocks); - KUNIT_EXPECT_FALSE_MSG(test, igt_check_mm(test, &mm), "post-mm check failed\n"); + KUNIT_EXPECT_FALSE_MSG(test, check_mm(test, &mm), "post-mm check failed\n"); drm_buddy_fini(&mm); } -static void igt_buddy_alloc_limit(struct kunit *test) +static void drm_test_buddy_alloc_limit(struct kunit *test) { u64 size = U64_MAX, start = 0; struct drm_buddy_block *block; @@ -735,12 +735,12 @@ static int drm_buddy_init_test(struct kunit *test) } static struct kunit_case drm_buddy_tests[] = { - KUNIT_CASE(igt_buddy_alloc_limit), - KUNIT_CASE(igt_buddy_alloc_range), - KUNIT_CASE(igt_buddy_alloc_optimistic), - KUNIT_CASE(igt_buddy_alloc_pessimistic), - KUNIT_CASE(igt_buddy_alloc_smoke), - KUNIT_CASE(igt_buddy_alloc_pathological), + KUNIT_CASE(drm_test_buddy_alloc_limit), + KUNIT_CASE(drm_test_buddy_alloc_range), + KUNIT_CASE(drm_test_buddy_alloc_optimistic), + KUNIT_CASE(drm_test_buddy_alloc_pessimistic), + KUNIT_CASE(drm_test_buddy_alloc_smoke), + KUNIT_CASE(drm_test_buddy_alloc_pathological), {} }; diff --git a/drivers/gpu/drm/tests/drm_cmdline_parser_test.c b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c index 09b806e27506..34790e7a3760 100644 --- a/drivers/gpu/drm/tests/drm_cmdline_parser_test.c +++ b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c @@ -11,7 +11,7 @@ static const struct drm_connector no_connector = {}; -static void drm_cmdline_test_force_e_only(struct kunit *test) +static void drm_test_cmdline_force_e_only(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "e"; @@ -29,7 +29,7 @@ static void drm_cmdline_test_force_e_only(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON); } -static void drm_cmdline_test_force_D_only_not_digital(struct kunit *test) +static void drm_test_cmdline_force_D_only_not_digital(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "D"; @@ -51,7 +51,7 @@ static const struct drm_connector connector_hdmi = { .connector_type = DRM_MODE_CONNECTOR_HDMIB, }; -static void drm_cmdline_test_force_D_only_hdmi(struct kunit *test) +static void drm_test_cmdline_force_D_only_hdmi(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "D"; @@ -73,7 +73,7 @@ static const struct drm_connector connector_dvi = { .connector_type = DRM_MODE_CONNECTOR_DVII, }; -static void drm_cmdline_test_force_D_only_dvi(struct kunit *test) +static void drm_test_cmdline_force_D_only_dvi(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "D"; @@ -91,7 +91,7 @@ static void drm_cmdline_test_force_D_only_dvi(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON_DIGITAL); } -static void drm_cmdline_test_force_d_only(struct kunit *test) +static void drm_test_cmdline_force_d_only(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "d"; @@ -109,7 +109,7 @@ static void drm_cmdline_test_force_d_only(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_OFF); } -static void drm_cmdline_test_res(struct kunit *test) +static void drm_test_cmdline_res(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480"; @@ -131,7 +131,7 @@ static void drm_cmdline_test_res(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED); } -static void drm_cmdline_test_res_vesa(struct kunit *test) +static void drm_test_cmdline_res_vesa(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480M"; @@ -153,7 +153,7 @@ static void drm_cmdline_test_res_vesa(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED); } -static void drm_cmdline_test_res_vesa_rblank(struct kunit *test) +static void drm_test_cmdline_res_vesa_rblank(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480MR"; @@ -175,7 +175,7 @@ static void drm_cmdline_test_res_vesa_rblank(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED); } -static void drm_cmdline_test_res_rblank(struct kunit *test) +static void drm_test_cmdline_res_rblank(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480R"; @@ -197,7 +197,7 @@ static void drm_cmdline_test_res_rblank(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED); } -static void drm_cmdline_test_res_bpp(struct kunit *test) +static void drm_test_cmdline_res_bpp(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480-24"; @@ -220,7 +220,7 @@ static void drm_cmdline_test_res_bpp(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED); } -static void drm_cmdline_test_res_refresh(struct kunit *test) +static void drm_test_cmdline_res_refresh(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480@60"; @@ -243,7 +243,7 @@ static void drm_cmdline_test_res_refresh(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED); } -static void drm_cmdline_test_res_bpp_refresh(struct kunit *test) +static void drm_test_cmdline_res_bpp_refresh(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480-24@60"; @@ -267,7 +267,7 @@ static void drm_cmdline_test_res_bpp_refresh(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED); } -static void drm_cmdline_test_res_bpp_refresh_interlaced(struct kunit *test) +static void drm_test_cmdline_res_bpp_refresh_interlaced(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480-24@60i"; @@ -291,7 +291,7 @@ static void drm_cmdline_test_res_bpp_refresh_interlaced(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED); } -static void drm_cmdline_test_res_bpp_refresh_margins(struct kunit *test) +static void drm_test_cmdline_res_bpp_refresh_margins(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480-24@60m"; @@ -315,7 +315,7 @@ static void drm_cmdline_test_res_bpp_refresh_margins(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED); } -static void drm_cmdline_test_res_bpp_refresh_force_off(struct kunit *test) +static void drm_test_cmdline_res_bpp_refresh_force_off(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480-24@60d"; @@ -339,7 +339,7 @@ static void drm_cmdline_test_res_bpp_refresh_force_off(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_OFF); } -static void drm_cmdline_test_res_bpp_refresh_force_on(struct kunit *test) +static void drm_test_cmdline_res_bpp_refresh_force_on(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480-24@60e"; @@ -363,7 +363,7 @@ static void drm_cmdline_test_res_bpp_refresh_force_on(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON); } -static void drm_cmdline_test_res_bpp_refresh_force_on_analog(struct kunit *test) +static void drm_test_cmdline_res_bpp_refresh_force_on_analog(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480-24@60D"; @@ -387,7 +387,7 @@ static void drm_cmdline_test_res_bpp_refresh_force_on_analog(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON); } -static void drm_cmdline_test_res_bpp_refresh_force_on_digital(struct kunit *test) +static void drm_test_cmdline_res_bpp_refresh_force_on_digital(struct kunit *test) { struct drm_cmdline_mode mode = { }; static const struct drm_connector connector = { @@ -414,7 +414,7 @@ static void drm_cmdline_test_res_bpp_refresh_force_on_digital(struct kunit *test KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON_DIGITAL); } -static void drm_cmdline_test_res_bpp_refresh_interlaced_margins_force_on(struct kunit *test) +static void drm_test_cmdline_res_bpp_refresh_interlaced_margins_force_on(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480-24@60ime"; @@ -438,7 +438,7 @@ static void drm_cmdline_test_res_bpp_refresh_interlaced_margins_force_on(struct KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON); } -static void drm_cmdline_test_res_margins_force_on(struct kunit *test) +static void drm_test_cmdline_res_margins_force_on(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480me"; @@ -460,7 +460,7 @@ static void drm_cmdline_test_res_margins_force_on(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON); } -static void drm_cmdline_test_res_vesa_margins(struct kunit *test) +static void drm_test_cmdline_res_vesa_margins(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480Mm"; @@ -482,7 +482,7 @@ static void drm_cmdline_test_res_vesa_margins(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED); } -static void drm_cmdline_test_name(struct kunit *test) +static void drm_test_cmdline_name(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "NTSC"; @@ -494,7 +494,7 @@ static void drm_cmdline_test_name(struct kunit *test) KUNIT_EXPECT_FALSE(test, mode.bpp_specified); } -static void drm_cmdline_test_name_bpp(struct kunit *test) +static void drm_test_cmdline_name_bpp(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "NTSC-24"; @@ -509,7 +509,7 @@ static void drm_cmdline_test_name_bpp(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.bpp, 24); } -static void drm_cmdline_test_name_option(struct kunit *test) +static void drm_test_cmdline_name_option(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "NTSC,rotate=180"; @@ -521,7 +521,7 @@ static void drm_cmdline_test_name_option(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_180); } -static void drm_cmdline_test_name_bpp_option(struct kunit *test) +static void drm_test_cmdline_name_bpp_option(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "NTSC-24,rotate=180"; @@ -535,7 +535,7 @@ static void drm_cmdline_test_name_bpp_option(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.bpp, 24); } -static void drm_cmdline_test_rotate_0(struct kunit *test) +static void drm_test_cmdline_rotate_0(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480,rotate=0"; @@ -558,7 +558,7 @@ static void drm_cmdline_test_rotate_0(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED); } -static void drm_cmdline_test_rotate_90(struct kunit *test) +static void drm_test_cmdline_rotate_90(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480,rotate=90"; @@ -581,7 +581,7 @@ static void drm_cmdline_test_rotate_90(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED); } -static void drm_cmdline_test_rotate_180(struct kunit *test) +static void drm_test_cmdline_rotate_180(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480,rotate=180"; @@ -604,7 +604,7 @@ static void drm_cmdline_test_rotate_180(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED); } -static void drm_cmdline_test_rotate_270(struct kunit *test) +static void drm_test_cmdline_rotate_270(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480,rotate=270"; @@ -627,7 +627,7 @@ static void drm_cmdline_test_rotate_270(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED); } -static void drm_cmdline_test_hmirror(struct kunit *test) +static void drm_test_cmdline_hmirror(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480,reflect_x"; @@ -650,7 +650,7 @@ static void drm_cmdline_test_hmirror(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED); } -static void drm_cmdline_test_vmirror(struct kunit *test) +static void drm_test_cmdline_vmirror(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480,reflect_y"; @@ -673,7 +673,7 @@ static void drm_cmdline_test_vmirror(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED); } -static void drm_cmdline_test_margin_options(struct kunit *test) +static void drm_test_cmdline_margin_options(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = @@ -700,7 +700,7 @@ static void drm_cmdline_test_margin_options(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED); } -static void drm_cmdline_test_multiple_options(struct kunit *test) +static void drm_test_cmdline_multiple_options(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480,rotate=270,reflect_x"; @@ -723,7 +723,7 @@ static void drm_cmdline_test_multiple_options(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED); } -static void drm_cmdline_test_bpp_extra_and_option(struct kunit *test) +static void drm_test_cmdline_bpp_extra_and_option(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480-24e,rotate=180"; @@ -747,7 +747,7 @@ static void drm_cmdline_test_bpp_extra_and_option(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON); } -static void drm_cmdline_test_extra_and_option(struct kunit *test) +static void drm_test_cmdline_extra_and_option(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "720x480e,rotate=180"; @@ -769,7 +769,7 @@ static void drm_cmdline_test_extra_and_option(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON); } -static void drm_cmdline_test_freestanding_options(struct kunit *test) +static void drm_test_cmdline_freestanding_options(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "margin_right=14,margin_left=24,margin_bottom=36,margin_top=42"; @@ -792,7 +792,7 @@ static void drm_cmdline_test_freestanding_options(struct kunit *test) KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED); } -static void drm_cmdline_test_freestanding_force_e_and_options(struct kunit *test) +static void drm_test_cmdline_freestanding_force_e_and_options(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "e,margin_right=14,margin_left=24,margin_bottom=36,margin_top=42"; @@ -815,7 +815,7 @@ static void drm_cmdline_test_freestanding_force_e_and_options(struct kunit *test KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON); } -static void drm_cmdline_test_panel_orientation(struct kunit *test) +static void drm_test_cmdline_panel_orientation(struct kunit *test) { struct drm_cmdline_mode mode = { }; const char *cmdline = "panel_orientation=upside_down"; @@ -840,7 +840,7 @@ struct drm_cmdline_invalid_test { const char *cmdline; }; -static void drm_cmdline_test_invalid(struct kunit *test) +static void drm_test_cmdline_invalid(struct kunit *test) { const struct drm_cmdline_invalid_test *params = test->param_value; struct drm_cmdline_mode mode = { }; @@ -938,45 +938,45 @@ static void drm_cmdline_invalid_desc(const struct drm_cmdline_invalid_test *t, KUNIT_ARRAY_PARAM(drm_cmdline_invalid, drm_cmdline_invalid_tests, drm_cmdline_invalid_desc); static struct kunit_case drm_cmdline_parser_tests[] = { - KUNIT_CASE(drm_cmdline_test_force_d_only), - KUNIT_CASE(drm_cmdline_test_force_D_only_dvi), - KUNIT_CASE(drm_cmdline_test_force_D_only_hdmi), - KUNIT_CASE(drm_cmdline_test_force_D_only_not_digital), - KUNIT_CASE(drm_cmdline_test_force_e_only), - KUNIT_CASE(drm_cmdline_test_res), - KUNIT_CASE(drm_cmdline_test_res_vesa), - KUNIT_CASE(drm_cmdline_test_res_vesa_rblank), - KUNIT_CASE(drm_cmdline_test_res_rblank), - KUNIT_CASE(drm_cmdline_test_res_bpp), - KUNIT_CASE(drm_cmdline_test_res_refresh), - KUNIT_CASE(drm_cmdline_test_res_bpp_refresh), - KUNIT_CASE(drm_cmdline_test_res_bpp_refresh_interlaced), - KUNIT_CASE(drm_cmdline_test_res_bpp_refresh_margins), - KUNIT_CASE(drm_cmdline_test_res_bpp_refresh_force_off), - KUNIT_CASE(drm_cmdline_test_res_bpp_refresh_force_on), - KUNIT_CASE(drm_cmdline_test_res_bpp_refresh_force_on_analog), - KUNIT_CASE(drm_cmdline_test_res_bpp_refresh_force_on_digital), - KUNIT_CASE(drm_cmdline_test_res_bpp_refresh_interlaced_margins_force_on), - KUNIT_CASE(drm_cmdline_test_res_margins_force_on), - KUNIT_CASE(drm_cmdline_test_res_vesa_margins), - KUNIT_CASE(drm_cmdline_test_name), - KUNIT_CASE(drm_cmdline_test_name_bpp), - KUNIT_CASE(drm_cmdline_test_name_option), - KUNIT_CASE(drm_cmdline_test_name_bpp_option), - KUNIT_CASE(drm_cmdline_test_rotate_0), - KUNIT_CASE(drm_cmdline_test_rotate_90), - KUNIT_CASE(drm_cmdline_test_rotate_180), - KUNIT_CASE(drm_cmdline_test_rotate_270), - KUNIT_CASE(drm_cmdline_test_hmirror), - KUNIT_CASE(drm_cmdline_test_vmirror), - KUNIT_CASE(drm_cmdline_test_margin_options), - KUNIT_CASE(drm_cmdline_test_multiple_options), - KUNIT_CASE(drm_cmdline_test_bpp_extra_and_option), - KUNIT_CASE(drm_cmdline_test_extra_and_option), - KUNIT_CASE(drm_cmdline_test_freestanding_options), - KUNIT_CASE(drm_cmdline_test_freestanding_force_e_and_options), - KUNIT_CASE(drm_cmdline_test_panel_orientation), - KUNIT_CASE_PARAM(drm_cmdline_test_invalid, drm_cmdline_invalid_gen_params), + KUNIT_CASE(drm_test_cmdline_force_d_only), + KUNIT_CASE(drm_test_cmdline_force_D_only_dvi), + KUNIT_CASE(drm_test_cmdline_force_D_only_hdmi), + KUNIT_CASE(drm_test_cmdline_force_D_only_not_digital), + KUNIT_CASE(drm_test_cmdline_force_e_only), + KUNIT_CASE(drm_test_cmdline_res), + KUNIT_CASE(drm_test_cmdline_res_vesa), + KUNIT_CASE(drm_test_cmdline_res_vesa_rblank), + KUNIT_CASE(drm_test_cmdline_res_rblank), + KUNIT_CASE(drm_test_cmdline_res_bpp), + KUNIT_CASE(drm_test_cmdline_res_refresh), + KUNIT_CASE(drm_test_cmdline_res_bpp_refresh), + KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_interlaced), + KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_margins), + KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_force_off), + KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_force_on), + KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_force_on_analog), + KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_force_on_digital), + KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_interlaced_margins_force_on), + KUNIT_CASE(drm_test_cmdline_res_margins_force_on), + KUNIT_CASE(drm_test_cmdline_res_vesa_margins), + KUNIT_CASE(drm_test_cmdline_name), + KUNIT_CASE(drm_test_cmdline_name_bpp), + KUNIT_CASE(drm_test_cmdline_name_option), + KUNIT_CASE(drm_test_cmdline_name_bpp_option), + KUNIT_CASE(drm_test_cmdline_rotate_0), + KUNIT_CASE(drm_test_cmdline_rotate_90), + KUNIT_CASE(drm_test_cmdline_rotate_180), + KUNIT_CASE(drm_test_cmdline_rotate_270), + KUNIT_CASE(drm_test_cmdline_hmirror), + KUNIT_CASE(drm_test_cmdline_vmirror), + KUNIT_CASE(drm_test_cmdline_margin_options), + KUNIT_CASE(drm_test_cmdline_multiple_options), + KUNIT_CASE(drm_test_cmdline_bpp_extra_and_option), + KUNIT_CASE(drm_test_cmdline_extra_and_option), + KUNIT_CASE(drm_test_cmdline_freestanding_options), + KUNIT_CASE(drm_test_cmdline_freestanding_force_e_and_options), + KUNIT_CASE(drm_test_cmdline_panel_orientation), + KUNIT_CASE_PARAM(drm_test_cmdline_invalid, drm_cmdline_invalid_gen_params), {} }; diff --git a/drivers/gpu/drm/tests/drm_damage_helper_test.c b/drivers/gpu/drm/tests/drm_damage_helper_test.c index bf250bd08d7e..115034fc3421 100644 --- a/drivers/gpu/drm/tests/drm_damage_helper_test.c +++ b/drivers/gpu/drm/tests/drm_damage_helper_test.c @@ -59,6 +59,11 @@ static int drm_damage_helper_init(struct kunit *test) static void set_plane_src(struct drm_plane_state *state, int x1, int y1, int x2, int y2) { + state->src_x = x1; + state->src_y = y1; + state->src_w = x2 - x1; + state->src_h = y2 - y1; + state->src.x1 = x1; state->src.y1 = y1; state->src.x2 = x2; @@ -111,7 +116,7 @@ static void check_damage_clip(struct kunit *test, struct drm_rect *r, r->x1, r->y1, r->x2, r->y2, x1, y1, x2, y2); } -static void igt_damage_iter_no_damage(struct kunit *test) +static void drm_test_damage_iter_no_damage(struct kunit *test) { struct drm_damage_mock *mock = test->priv; struct drm_atomic_helper_damage_iter iter; @@ -129,7 +134,7 @@ static void igt_damage_iter_no_damage(struct kunit *test) check_damage_clip(test, &clip, 0, 0, 2048, 2048); } -static void igt_damage_iter_no_damage_fractional_src(struct kunit *test) +static void drm_test_damage_iter_no_damage_fractional_src(struct kunit *test) { struct drm_damage_mock *mock = test->priv; struct drm_atomic_helper_damage_iter iter; @@ -150,7 +155,7 @@ static void igt_damage_iter_no_damage_fractional_src(struct kunit *test) check_damage_clip(test, &clip, 3, 3, 1028, 772); } -static void igt_damage_iter_no_damage_src_moved(struct kunit *test) +static void drm_test_damage_iter_no_damage_src_moved(struct kunit *test) { struct drm_damage_mock *mock = test->priv; struct drm_atomic_helper_damage_iter iter; @@ -169,7 +174,7 @@ static void igt_damage_iter_no_damage_src_moved(struct kunit *test) check_damage_clip(test, &clip, 10, 10, 1034, 778); } -static void igt_damage_iter_no_damage_fractional_src_moved(struct kunit *test) +static void drm_test_damage_iter_no_damage_fractional_src_moved(struct kunit *test) { struct drm_damage_mock *mock = test->priv; struct drm_atomic_helper_damage_iter iter; @@ -189,7 +194,7 @@ static void igt_damage_iter_no_damage_fractional_src_moved(struct kunit *test) check_damage_clip(test, &clip, 4, 4, 1029, 773); } -static void igt_damage_iter_no_damage_not_visible(struct kunit *test) +static void drm_test_damage_iter_no_damage_not_visible(struct kunit *test) { struct drm_damage_mock *mock = test->priv; struct drm_atomic_helper_damage_iter iter; @@ -207,7 +212,7 @@ static void igt_damage_iter_no_damage_not_visible(struct kunit *test) KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage."); } -static void igt_damage_iter_no_damage_no_crtc(struct kunit *test) +static void drm_test_damage_iter_no_damage_no_crtc(struct kunit *test) { struct drm_damage_mock *mock = test->priv; struct drm_atomic_helper_damage_iter iter; @@ -225,7 +230,7 @@ static void igt_damage_iter_no_damage_no_crtc(struct kunit *test) KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage."); } -static void igt_damage_iter_no_damage_no_fb(struct kunit *test) +static void drm_test_damage_iter_no_damage_no_fb(struct kunit *test) { struct drm_damage_mock *mock = test->priv; struct drm_atomic_helper_damage_iter iter; @@ -243,7 +248,7 @@ static void igt_damage_iter_no_damage_no_fb(struct kunit *test) KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage."); } -static void igt_damage_iter_simple_damage(struct kunit *test) +static void drm_test_damage_iter_simple_damage(struct kunit *test) { struct drm_damage_mock *mock = test->priv; struct drm_atomic_helper_damage_iter iter; @@ -266,7 +271,7 @@ static void igt_damage_iter_simple_damage(struct kunit *test) check_damage_clip(test, &clip, 0, 0, 1024, 768); } -static void igt_damage_iter_single_damage(struct kunit *test) +static void drm_test_damage_iter_single_damage(struct kunit *test) { struct drm_damage_mock *mock = test->priv; struct drm_atomic_helper_damage_iter iter; @@ -288,7 +293,7 @@ static void igt_damage_iter_single_damage(struct kunit *test) check_damage_clip(test, &clip, 256, 192, 768, 576); } -static void igt_damage_iter_single_damage_intersect_src(struct kunit *test) +static void drm_test_damage_iter_single_damage_intersect_src(struct kunit *test) { struct drm_damage_mock *mock = test->priv; struct drm_atomic_helper_damage_iter iter; @@ -311,7 +316,7 @@ static void igt_damage_iter_single_damage_intersect_src(struct kunit *test) check_damage_clip(test, &clip, 256, 192, 1024, 768); } -static void igt_damage_iter_single_damage_outside_src(struct kunit *test) +static void drm_test_damage_iter_single_damage_outside_src(struct kunit *test) { struct drm_damage_mock *mock = test->priv; struct drm_atomic_helper_damage_iter iter; @@ -333,7 +338,7 @@ static void igt_damage_iter_single_damage_outside_src(struct kunit *test) KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage."); } -static void igt_damage_iter_single_damage_fractional_src(struct kunit *test) +static void drm_test_damage_iter_single_damage_fractional_src(struct kunit *test) { struct drm_damage_mock *mock = test->priv; struct drm_atomic_helper_damage_iter iter; @@ -358,7 +363,7 @@ static void igt_damage_iter_single_damage_fractional_src(struct kunit *test) check_damage_clip(test, &clip, 10, 10, 256, 330); } -static void igt_damage_iter_single_damage_intersect_fractional_src(struct kunit *test) +static void drm_test_damage_iter_single_damage_intersect_fractional_src(struct kunit *test) { struct drm_damage_mock *mock = test->priv; struct drm_atomic_helper_damage_iter iter; @@ -385,7 +390,7 @@ static void igt_damage_iter_single_damage_intersect_fractional_src(struct kunit check_damage_clip(test, &clip, 10, 4, 1029, 330); } -static void igt_damage_iter_single_damage_outside_fractional_src(struct kunit *test) +static void drm_test_damage_iter_single_damage_outside_fractional_src(struct kunit *test) { struct drm_damage_mock *mock = test->priv; struct drm_atomic_helper_damage_iter iter; @@ -410,7 +415,7 @@ static void igt_damage_iter_single_damage_outside_fractional_src(struct kunit *t KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage."); } -static void igt_damage_iter_single_damage_src_moved(struct kunit *test) +static void drm_test_damage_iter_single_damage_src_moved(struct kunit *test) { struct drm_damage_mock *mock = test->priv; struct drm_atomic_helper_damage_iter iter; @@ -435,7 +440,7 @@ static void igt_damage_iter_single_damage_src_moved(struct kunit *test) check_damage_clip(test, &clip, 10, 10, 1034, 778); } -static void igt_damage_iter_single_damage_fractional_src_moved(struct kunit *test) +static void drm_test_damage_iter_single_damage_fractional_src_moved(struct kunit *test) { struct drm_damage_mock *mock = test->priv; struct drm_atomic_helper_damage_iter iter; @@ -462,7 +467,7 @@ static void igt_damage_iter_single_damage_fractional_src_moved(struct kunit *tes check_damage_clip(test, &clip, 4, 4, 1029, 773); } -static void igt_damage_iter_damage(struct kunit *test) +static void drm_test_damage_iter_damage(struct kunit *test) { struct drm_damage_mock *mock = test->priv; struct drm_atomic_helper_damage_iter iter; @@ -490,7 +495,7 @@ static void igt_damage_iter_damage(struct kunit *test) KUNIT_EXPECT_EQ_MSG(test, num_hits, 2, "Should return damage when set."); } -static void igt_damage_iter_damage_one_intersect(struct kunit *test) +static void drm_test_damage_iter_damage_one_intersect(struct kunit *test) { struct drm_damage_mock *mock = test->priv; struct drm_atomic_helper_damage_iter iter; @@ -520,7 +525,7 @@ static void igt_damage_iter_damage_one_intersect(struct kunit *test) KUNIT_EXPECT_EQ_MSG(test, num_hits, 2, "Should return damage when set."); } -static void igt_damage_iter_damage_one_outside(struct kunit *test) +static void drm_test_damage_iter_damage_one_outside(struct kunit *test) { struct drm_damage_mock *mock = test->priv; struct drm_atomic_helper_damage_iter iter; @@ -544,7 +549,7 @@ static void igt_damage_iter_damage_one_outside(struct kunit *test) check_damage_clip(test, &clip, 240, 200, 280, 250); } -static void igt_damage_iter_damage_src_moved(struct kunit *test) +static void drm_test_damage_iter_damage_src_moved(struct kunit *test) { struct drm_damage_mock *mock = test->priv; struct drm_atomic_helper_damage_iter iter; @@ -571,7 +576,7 @@ static void igt_damage_iter_damage_src_moved(struct kunit *test) check_damage_clip(test, &clip, 3, 3, 1028, 772); } -static void igt_damage_iter_damage_not_visible(struct kunit *test) +static void drm_test_damage_iter_damage_not_visible(struct kunit *test) { struct drm_damage_mock *mock = test->priv; struct drm_atomic_helper_damage_iter iter; @@ -599,27 +604,27 @@ static void igt_damage_iter_damage_not_visible(struct kunit *test) } static struct kunit_case drm_damage_helper_tests[] = { - KUNIT_CASE(igt_damage_iter_no_damage), - KUNIT_CASE(igt_damage_iter_no_damage_fractional_src), - KUNIT_CASE(igt_damage_iter_no_damage_src_moved), - KUNIT_CASE(igt_damage_iter_no_damage_fractional_src_moved), - KUNIT_CASE(igt_damage_iter_no_damage_not_visible), - KUNIT_CASE(igt_damage_iter_no_damage_no_crtc), - KUNIT_CASE(igt_damage_iter_no_damage_no_fb), - KUNIT_CASE(igt_damage_iter_simple_damage), - KUNIT_CASE(igt_damage_iter_single_damage), - KUNIT_CASE(igt_damage_iter_single_damage_intersect_src), - KUNIT_CASE(igt_damage_iter_single_damage_outside_src), - KUNIT_CASE(igt_damage_iter_single_damage_fractional_src), - KUNIT_CASE(igt_damage_iter_single_damage_intersect_fractional_src), - KUNIT_CASE(igt_damage_iter_single_damage_outside_fractional_src), - KUNIT_CASE(igt_damage_iter_single_damage_src_moved), - KUNIT_CASE(igt_damage_iter_single_damage_fractional_src_moved), - KUNIT_CASE(igt_damage_iter_damage), - KUNIT_CASE(igt_damage_iter_damage_one_intersect), - KUNIT_CASE(igt_damage_iter_damage_one_outside), - KUNIT_CASE(igt_damage_iter_damage_src_moved), - KUNIT_CASE(igt_damage_iter_damage_not_visible), + KUNIT_CASE(drm_test_damage_iter_no_damage), + KUNIT_CASE(drm_test_damage_iter_no_damage_fractional_src), + KUNIT_CASE(drm_test_damage_iter_no_damage_src_moved), + KUNIT_CASE(drm_test_damage_iter_no_damage_fractional_src_moved), + KUNIT_CASE(drm_test_damage_iter_no_damage_not_visible), + KUNIT_CASE(drm_test_damage_iter_no_damage_no_crtc), + KUNIT_CASE(drm_test_damage_iter_no_damage_no_fb), + KUNIT_CASE(drm_test_damage_iter_simple_damage), + KUNIT_CASE(drm_test_damage_iter_single_damage), + KUNIT_CASE(drm_test_damage_iter_single_damage_intersect_src), + KUNIT_CASE(drm_test_damage_iter_single_damage_outside_src), + KUNIT_CASE(drm_test_damage_iter_single_damage_fractional_src), + KUNIT_CASE(drm_test_damage_iter_single_damage_intersect_fractional_src), + KUNIT_CASE(drm_test_damage_iter_single_damage_outside_fractional_src), + KUNIT_CASE(drm_test_damage_iter_single_damage_src_moved), + KUNIT_CASE(drm_test_damage_iter_single_damage_fractional_src_moved), + KUNIT_CASE(drm_test_damage_iter_damage), + KUNIT_CASE(drm_test_damage_iter_damage_one_intersect), + KUNIT_CASE(drm_test_damage_iter_damage_one_outside), + KUNIT_CASE(drm_test_damage_iter_damage_src_moved), + KUNIT_CASE(drm_test_damage_iter_damage_not_visible), { } }; diff --git a/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c index 1d2fade56227..65c9d225b558 100644 --- a/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c +++ b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c @@ -16,7 +16,7 @@ #include "../display/drm_dp_mst_topology_internal.h" -static void igt_dp_mst_calc_pbn_mode(struct kunit *test) +static void drm_test_dp_mst_calc_pbn_mode(struct kunit *test) { int pbn, i; const struct { @@ -177,7 +177,7 @@ out: return result; } -static void igt_dp_mst_sideband_msg_req_decode(struct kunit *test) +static void drm_test_dp_mst_sideband_msg_req_decode(struct kunit *test) { struct drm_dp_sideband_msg_req_body in = { 0 }; u8 data[] = { 0xff, 0x0, 0xdd }; @@ -271,8 +271,8 @@ static void igt_dp_mst_sideband_msg_req_decode(struct kunit *test) } static struct kunit_case drm_dp_mst_helper_tests[] = { - KUNIT_CASE(igt_dp_mst_calc_pbn_mode), - KUNIT_CASE(igt_dp_mst_sideband_msg_req_decode), + KUNIT_CASE(drm_test_dp_mst_calc_pbn_mode), + KUNIT_CASE(drm_test_dp_mst_sideband_msg_req_decode), { } }; diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c index 828487071796..8d86c250c2ec 100644 --- a/drivers/gpu/drm/tests/drm_format_helper_test.c +++ b/drivers/gpu/drm/tests/drm_format_helper_test.c @@ -16,6 +16,11 @@ #define TEST_BUF_SIZE 50 +struct convert_to_gray8_result { + unsigned int dst_pitch; + const u8 expected[TEST_BUF_SIZE]; +}; + struct convert_to_rgb332_result { unsigned int dst_pitch; const u8 expected[TEST_BUF_SIZE]; @@ -27,13 +32,26 @@ struct convert_to_rgb565_result { const u16 expected_swab[TEST_BUF_SIZE]; }; +struct convert_to_rgb888_result { + unsigned int dst_pitch; + const u8 expected[TEST_BUF_SIZE]; +}; + +struct convert_to_xrgb2101010_result { + unsigned int dst_pitch; + const u32 expected[TEST_BUF_SIZE]; +}; + struct convert_xrgb8888_case { const char *name; unsigned int pitch; struct drm_rect clip; const u32 xrgb8888[TEST_BUF_SIZE]; + struct convert_to_gray8_result gray8_result; struct convert_to_rgb332_result rgb332_result; struct convert_to_rgb565_result rgb565_result; + struct convert_to_rgb888_result rgb888_result; + struct convert_to_xrgb2101010_result xrgb2101010_result; }; static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { @@ -42,6 +60,10 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { .pitch = 1 * 4, .clip = DRM_RECT_INIT(0, 0, 1, 1), .xrgb8888 = { 0x01FF0000 }, + .gray8_result = { + .dst_pitch = 0, + .expected = { 0x4C }, + }, .rgb332_result = { .dst_pitch = 0, .expected = { 0xE0 }, @@ -51,6 +73,14 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { .expected = { 0xF800 }, .expected_swab = { 0x00F8 }, }, + .rgb888_result = { + .dst_pitch = 0, + .expected = { 0x00, 0x00, 0xFF }, + }, + .xrgb2101010_result = { + .dst_pitch = 0, + .expected = { 0x3FF00000 }, + }, }, { .name = "single_pixel_clip_rectangle", @@ -60,6 +90,10 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { 0x00000000, 0x00000000, 0x00000000, 0x10FF0000, }, + .gray8_result = { + .dst_pitch = 0, + .expected = { 0x4C }, + }, .rgb332_result = { .dst_pitch = 0, .expected = { 0xE0 }, @@ -69,6 +103,14 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { .expected = { 0xF800 }, .expected_swab = { 0x00F8 }, }, + .rgb888_result = { + .dst_pitch = 0, + .expected = { 0x00, 0x00, 0xFF }, + }, + .xrgb2101010_result = { + .dst_pitch = 0, + .expected = { 0x3FF00000 }, + }, }, { /* Well known colors: White, black, red, green, blue, magenta, @@ -85,6 +127,15 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { 0x00000000, 0x550000FF, 0x66FF00FF, 0x00000000, 0x00000000, 0x77FFFF00, 0x8800FFFF, 0x00000000, }, + .gray8_result = { + .dst_pitch = 0, + .expected = { + 0xFF, 0x00, + 0x4C, 0x99, + 0x19, 0x66, + 0xE5, 0xB2, + }, + }, .rgb332_result = { .dst_pitch = 0, .expected = { @@ -109,6 +160,24 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { 0xE0FF, 0xFF07, }, }, + .rgb888_result = { + .dst_pitch = 0, + .expected = { + 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xFF, 0x00, 0xFF, 0x00, + 0xFF, 0x00, 0x00, 0xFF, 0x00, 0xFF, + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, + }, + }, + .xrgb2101010_result = { + .dst_pitch = 0, + .expected = { + 0x3FFFFFFF, 0x00000000, + 0x3FF00000, 0x000FFC00, + 0x000003FF, 0x3FF003FF, + 0x3FFFFC00, 0x000FFFFF, + }, + }, }, { /* Randomly picked colors. Full buffer within the clip area. */ @@ -120,6 +189,14 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { 0xD16C7073, 0xA20E449C, 0xB2114D05, 0xC2A80303, 0xD26C7073, 0xA30E449C, }, + .gray8_result = { + .dst_pitch = 5, + .expected = { + 0x3C, 0x33, 0x34, 0x00, 0x00, + 0x6F, 0x3C, 0x33, 0x00, 0x00, + 0x34, 0x6F, 0x3C, 0x00, 0x00, + }, + }, .rgb332_result = { .dst_pitch = 5, .expected = { @@ -141,6 +218,25 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { 0x00A8, 0x8E6B, 0x330A, 0x0000, 0x0000, }, }, + .rgb888_result = { + .dst_pitch = 15, + .expected = { + 0x9C, 0x44, 0x0E, 0x05, 0x4D, 0x11, 0x03, 0x03, 0xA8, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x73, 0x70, 0x6C, 0x9C, 0x44, 0x0E, 0x05, 0x4D, 0x11, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x03, 0x03, 0xA8, 0x73, 0x70, 0x6C, 0x9C, 0x44, 0x0E, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + }, + .xrgb2101010_result = { + .dst_pitch = 20, + .expected = { + 0x03844672, 0x0444D414, 0x2A20300C, 0x00000000, 0x00000000, + 0x1B1705CD, 0x03844672, 0x0444D414, 0x00000000, 0x00000000, + 0x2A20300C, 0x1B1705CD, 0x03844672, 0x00000000, 0x00000000, + }, + }, }, }; @@ -192,7 +288,37 @@ static void convert_xrgb8888_case_desc(struct convert_xrgb8888_case *t, KUNIT_ARRAY_PARAM(convert_xrgb8888, convert_xrgb8888_cases, convert_xrgb8888_case_desc); -static void xrgb8888_to_rgb332_test(struct kunit *test) +static void drm_test_fb_xrgb8888_to_gray8(struct kunit *test) +{ + const struct convert_xrgb8888_case *params = test->param_value; + const struct convert_to_gray8_result *result = ¶ms->gray8_result; + size_t dst_size; + __u8 *buf = NULL; + __u32 *xrgb8888 = NULL; + struct iosys_map dst, src; + + struct drm_framebuffer fb = { + .format = drm_format_info(DRM_FORMAT_XRGB8888), + .pitches = { params->pitch, 0, 0 }, + }; + + dst_size = conversion_buf_size(DRM_FORMAT_R8, result->dst_pitch, + ¶ms->clip); + KUNIT_ASSERT_GT(test, dst_size, 0); + + buf = kunit_kzalloc(test, dst_size, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf); + iosys_map_set_vaddr(&dst, buf); + + xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888); + iosys_map_set_vaddr(&src, xrgb8888); + + drm_fb_xrgb8888_to_gray8(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip); + KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0); +} + +static void drm_test_fb_xrgb8888_to_rgb332(struct kunit *test) { const struct convert_xrgb8888_case *params = test->param_value; const struct convert_to_rgb332_result *result = ¶ms->rgb332_result; @@ -222,7 +348,7 @@ static void xrgb8888_to_rgb332_test(struct kunit *test) KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0); } -static void xrgb8888_to_rgb565_test(struct kunit *test) +static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test) { const struct convert_xrgb8888_case *params = test->param_value; const struct convert_to_rgb565_result *result = ¶ms->rgb565_result; @@ -255,9 +381,73 @@ static void xrgb8888_to_rgb565_test(struct kunit *test) KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected_swab, dst_size), 0); } +static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test) +{ + const struct convert_xrgb8888_case *params = test->param_value; + const struct convert_to_rgb888_result *result = ¶ms->rgb888_result; + size_t dst_size; + __u8 *buf = NULL; + __u32 *xrgb8888 = NULL; + struct iosys_map dst, src; + + struct drm_framebuffer fb = { + .format = drm_format_info(DRM_FORMAT_XRGB8888), + .pitches = { params->pitch, 0, 0 }, + }; + + dst_size = conversion_buf_size(DRM_FORMAT_RGB888, result->dst_pitch, + ¶ms->clip); + KUNIT_ASSERT_GT(test, dst_size, 0); + + buf = kunit_kzalloc(test, dst_size, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf); + iosys_map_set_vaddr(&dst, buf); + + xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888); + iosys_map_set_vaddr(&src, xrgb8888); + + drm_fb_xrgb8888_to_rgb888(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip); + KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0); +} + +static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test) +{ + const struct convert_xrgb8888_case *params = test->param_value; + const struct convert_to_xrgb2101010_result *result = ¶ms->xrgb2101010_result; + size_t dst_size; + __u32 *buf = NULL; + __u32 *xrgb8888 = NULL; + struct iosys_map dst, src; + + struct drm_framebuffer fb = { + .format = drm_format_info(DRM_FORMAT_XRGB8888), + .pitches = { params->pitch, 0, 0 }, + }; + + dst_size = conversion_buf_size(DRM_FORMAT_XRGB2101010, + result->dst_pitch, ¶ms->clip); + KUNIT_ASSERT_GT(test, dst_size, 0); + + buf = kunit_kzalloc(test, dst_size, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf); + iosys_map_set_vaddr(&dst, buf); + + xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888); + iosys_map_set_vaddr(&src, xrgb8888); + + drm_fb_xrgb8888_to_xrgb2101010(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip); + buf = le32buf_to_cpu(test, buf, TEST_BUF_SIZE); + KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0); +} + static struct kunit_case drm_format_helper_test_cases[] = { - KUNIT_CASE_PARAM(xrgb8888_to_rgb332_test, convert_xrgb8888_gen_params), - KUNIT_CASE_PARAM(xrgb8888_to_rgb565_test, convert_xrgb8888_gen_params), + KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_gray8, convert_xrgb8888_gen_params), + KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb332, convert_xrgb8888_gen_params), + KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb565, convert_xrgb8888_gen_params), + KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb888, convert_xrgb8888_gen_params), + KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xrgb2101010, convert_xrgb8888_gen_params), {} }; diff --git a/drivers/gpu/drm/tests/drm_format_test.c b/drivers/gpu/drm/tests/drm_format_test.c index afb4bca72187..ec6996ce819a 100644 --- a/drivers/gpu/drm/tests/drm_format_test.c +++ b/drivers/gpu/drm/tests/drm_format_test.c @@ -9,103 +9,136 @@ #include <drm/drm_fourcc.h> -static void igt_check_drm_format_block_width(struct kunit *test) +static void drm_test_format_block_width_invalid(struct kunit *test) { const struct drm_format_info *info = NULL; - /* Test invalid arguments */ - KUNIT_EXPECT_FALSE(test, drm_format_info_block_width(info, 0)); - KUNIT_EXPECT_FALSE(test, drm_format_info_block_width(info, -1)); - KUNIT_EXPECT_FALSE(test, drm_format_info_block_width(info, 1)); - - /* Test 1 plane format */ - info = drm_format_info(DRM_FORMAT_XRGB4444); - KUNIT_EXPECT_TRUE(test, info); - KUNIT_EXPECT_TRUE(test, drm_format_info_block_width(info, 0)); - KUNIT_EXPECT_FALSE(test, drm_format_info_block_width(info, 1)); - KUNIT_EXPECT_FALSE(test, drm_format_info_block_width(info, -1)); - - /* Test 2 planes format */ - info = drm_format_info(DRM_FORMAT_NV12); - KUNIT_EXPECT_TRUE(test, info); - KUNIT_EXPECT_TRUE(test, drm_format_info_block_width(info, 0)); - KUNIT_EXPECT_TRUE(test, drm_format_info_block_width(info, 1)); - KUNIT_EXPECT_FALSE(test, drm_format_info_block_width(info, 2)); - KUNIT_EXPECT_FALSE(test, drm_format_info_block_width(info, -1)); - - /* Test 3 planes format */ - info = drm_format_info(DRM_FORMAT_YUV422); - KUNIT_EXPECT_TRUE(test, info); - KUNIT_EXPECT_TRUE(test, drm_format_info_block_width(info, 0)); - KUNIT_EXPECT_TRUE(test, drm_format_info_block_width(info, 1)); - KUNIT_EXPECT_TRUE(test, drm_format_info_block_width(info, 2)); - KUNIT_EXPECT_FALSE(test, drm_format_info_block_width(info, 3)); - KUNIT_EXPECT_FALSE(test, drm_format_info_block_width(info, -1)); - - /* Test a tiled format */ - info = drm_format_info(DRM_FORMAT_X0L0); - KUNIT_EXPECT_TRUE(test, info); + KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 0); +} + +static void drm_test_format_block_width_one_plane(struct kunit *test) +{ + const struct drm_format_info *info = drm_format_info(DRM_FORMAT_XRGB4444); + + KUNIT_ASSERT_NOT_NULL(test, info); + + KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 1); + KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0); +} + +static void drm_test_format_block_width_two_plane(struct kunit *test) +{ + const struct drm_format_info *info = drm_format_info(DRM_FORMAT_NV12); + + KUNIT_ASSERT_NOT_NULL(test, info); + + KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 1); + KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 1); + KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 2), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0); +} + +static void drm_test_format_block_width_three_plane(struct kunit *test) +{ + const struct drm_format_info *info = drm_format_info(DRM_FORMAT_YUV422); + + KUNIT_ASSERT_NOT_NULL(test, info); + + KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 1); + KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 1); + KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 2), 1); + KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 3), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0); +} + +static void drm_test_format_block_width_tiled(struct kunit *test) +{ + const struct drm_format_info *info = drm_format_info(DRM_FORMAT_X0L0); + + KUNIT_ASSERT_NOT_NULL(test, info); + KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 2); - KUNIT_EXPECT_FALSE(test, drm_format_info_block_width(info, 1)); - KUNIT_EXPECT_FALSE(test, drm_format_info_block_width(info, -1)); + KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0); } -static void igt_check_drm_format_block_height(struct kunit *test) +static void drm_test_format_block_height_invalid(struct kunit *test) { const struct drm_format_info *info = NULL; - /* Test invalid arguments */ - KUNIT_EXPECT_FALSE(test, drm_format_info_block_height(info, 0)); - KUNIT_EXPECT_FALSE(test, drm_format_info_block_height(info, -1)); - KUNIT_EXPECT_FALSE(test, drm_format_info_block_height(info, 1)); - - /* Test 1 plane format */ - info = drm_format_info(DRM_FORMAT_XRGB4444); - KUNIT_EXPECT_TRUE(test, info); - KUNIT_EXPECT_TRUE(test, drm_format_info_block_height(info, 0)); - KUNIT_EXPECT_FALSE(test, drm_format_info_block_height(info, -1)); - KUNIT_EXPECT_FALSE(test, drm_format_info_block_height(info, 1)); - - /* Test 2 planes format */ - info = drm_format_info(DRM_FORMAT_NV12); - KUNIT_EXPECT_TRUE(test, info); - KUNIT_EXPECT_TRUE(test, drm_format_info_block_height(info, 0)); - KUNIT_EXPECT_TRUE(test, drm_format_info_block_height(info, 1)); - KUNIT_EXPECT_FALSE(test, drm_format_info_block_height(info, 2)); - KUNIT_EXPECT_FALSE(test, drm_format_info_block_height(info, -1)); - - /* Test 3 planes format */ - info = drm_format_info(DRM_FORMAT_YUV422); - KUNIT_EXPECT_TRUE(test, info); - KUNIT_EXPECT_TRUE(test, drm_format_info_block_height(info, 0)); - KUNIT_EXPECT_TRUE(test, drm_format_info_block_height(info, 1)); - KUNIT_EXPECT_TRUE(test, drm_format_info_block_height(info, 2)); - KUNIT_EXPECT_FALSE(test, drm_format_info_block_height(info, 3)); - KUNIT_EXPECT_FALSE(test, drm_format_info_block_height(info, -1)); - - /* Test a tiled format */ - info = drm_format_info(DRM_FORMAT_X0L0); - KUNIT_EXPECT_TRUE(test, info); + KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 0); +} + +static void drm_test_format_block_height_one_plane(struct kunit *test) +{ + const struct drm_format_info *info = drm_format_info(DRM_FORMAT_XRGB4444); + + KUNIT_ASSERT_NOT_NULL(test, info); + + KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 1); + KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 0); +} + +static void drm_test_format_block_height_two_plane(struct kunit *test) +{ + const struct drm_format_info *info = drm_format_info(DRM_FORMAT_NV12); + + KUNIT_ASSERT_NOT_NULL(test, info); + + KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 1); + KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 1); + KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 2), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0); +} + +static void drm_test_format_block_height_three_plane(struct kunit *test) +{ + const struct drm_format_info *info = drm_format_info(DRM_FORMAT_YUV422); + + KUNIT_ASSERT_NOT_NULL(test, info); + + KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 1); + KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 1); + KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 2), 1); + KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 3), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0); +} + +static void drm_test_format_block_height_tiled(struct kunit *test) +{ + const struct drm_format_info *info = drm_format_info(DRM_FORMAT_X0L0); + + KUNIT_ASSERT_NOT_NULL(test, info); + KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 2); - KUNIT_EXPECT_FALSE(test, drm_format_info_block_height(info, 1)); - KUNIT_EXPECT_FALSE(test, drm_format_info_block_height(info, -1)); + KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0); } -static void igt_check_drm_format_min_pitch_for_single_plane(struct kunit *test) +static void drm_test_format_min_pitch_invalid(struct kunit *test) { const struct drm_format_info *info = NULL; - /* Test invalid arguments */ - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, 0, 0)); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, -1, 0)); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, 1, 0)); + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0); +} + +static void drm_test_format_min_pitch_one_plane_8bpp(struct kunit *test) +{ + const struct drm_format_info *info = drm_format_info(DRM_FORMAT_RGB332); - /* Test 1 plane 8 bits per pixel format */ - info = drm_format_info(DRM_FORMAT_RGB332); - KUNIT_EXPECT_TRUE(test, info); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, 0, 0)); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, -1, 0)); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, 1, 0)); + KUNIT_ASSERT_NOT_NULL(test, info); + + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0); KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 1); KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 2); @@ -118,13 +151,17 @@ static void igt_check_drm_format_min_pitch_for_single_plane(struct kunit *test) (uint64_t)UINT_MAX); KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)), (uint64_t)(UINT_MAX - 1)); +} + +static void drm_test_format_min_pitch_one_plane_16bpp(struct kunit *test) +{ + const struct drm_format_info *info = drm_format_info(DRM_FORMAT_XRGB4444); - /* Test 1 plane 16 bits per pixel format */ - info = drm_format_info(DRM_FORMAT_XRGB4444); - KUNIT_EXPECT_TRUE(test, info); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, 0, 0)); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, -1, 0)); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, 1, 0)); + KUNIT_ASSERT_NOT_NULL(test, info); + + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0); KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 2); KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 4); @@ -137,13 +174,17 @@ static void igt_check_drm_format_min_pitch_for_single_plane(struct kunit *test) (uint64_t)UINT_MAX * 2); KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)), (uint64_t)(UINT_MAX - 1) * 2); +} + +static void drm_test_format_min_pitch_one_plane_24bpp(struct kunit *test) +{ + const struct drm_format_info *info = drm_format_info(DRM_FORMAT_RGB888); - /* Test 1 plane 24 bits per pixel format */ - info = drm_format_info(DRM_FORMAT_RGB888); - KUNIT_EXPECT_TRUE(test, info); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, 0, 0)); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, -1, 0)); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, 1, 0)); + KUNIT_ASSERT_NOT_NULL(test, info); + + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0); KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 3); KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 6); @@ -154,15 +195,19 @@ static void igt_check_drm_format_min_pitch_for_single_plane(struct kunit *test) KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 2013); KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX), (uint64_t)UINT_MAX * 3); - KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)), + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX - 1), (uint64_t)(UINT_MAX - 1) * 3); +} - /* Test 1 plane 32 bits per pixel format */ - info = drm_format_info(DRM_FORMAT_ABGR8888); - KUNIT_EXPECT_TRUE(test, info); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, 0, 0)); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, -1, 0)); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, 1, 0)); +static void drm_test_format_min_pitch_one_plane_32bpp(struct kunit *test) +{ + const struct drm_format_info *info = drm_format_info(DRM_FORMAT_ABGR8888); + + KUNIT_ASSERT_NOT_NULL(test, info); + + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0); KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 4); KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 8); @@ -173,21 +218,20 @@ static void igt_check_drm_format_min_pitch_for_single_plane(struct kunit *test) KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 2684); KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX), (uint64_t)UINT_MAX * 4); - KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)), + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX - 1), (uint64_t)(UINT_MAX - 1) * 4); } -static void igt_check_drm_format_min_pitch_for_multi_planar(struct kunit *test) +static void drm_test_format_min_pitch_two_plane(struct kunit *test) { - const struct drm_format_info *info = NULL; + const struct drm_format_info *info = drm_format_info(DRM_FORMAT_NV12); + + KUNIT_ASSERT_NOT_NULL(test, info); - /* Test 2 planes format */ - info = drm_format_info(DRM_FORMAT_NV12); - KUNIT_EXPECT_TRUE(test, info); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, 0, 0)); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, 1, 0)); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, -1, 0)); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, 2, 0)); + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 0), 0); KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 1); KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 1), 2); @@ -211,15 +255,19 @@ static void igt_check_drm_format_min_pitch_for_multi_planar(struct kunit *test) (uint64_t)(UINT_MAX - 1)); KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, (UINT_MAX - 1) / 2), (uint64_t)(UINT_MAX - 1)); +} + +static void drm_test_format_min_pitch_three_plane_8bpp(struct kunit *test) +{ + const struct drm_format_info *info = drm_format_info(DRM_FORMAT_YUV422); + + KUNIT_ASSERT_NOT_NULL(test, info); - /* Test 3 planes 8 bits per pixel format */ - info = drm_format_info(DRM_FORMAT_YUV422); - KUNIT_EXPECT_TRUE(test, info); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, 0, 0)); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, 1, 0)); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, 2, 0)); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, -1, 0)); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, 3, 0)); + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 0), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 3, 0), 0); KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 1); KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 1), 1); @@ -256,16 +304,15 @@ static void igt_check_drm_format_min_pitch_for_multi_planar(struct kunit *test) (uint64_t)(UINT_MAX - 1) / 2); } -static void igt_check_drm_format_min_pitch_for_tiled_format(struct kunit *test) +static void drm_test_format_min_pitch_tiled(struct kunit *test) { - const struct drm_format_info *info = NULL; + const struct drm_format_info *info = drm_format_info(DRM_FORMAT_X0L2); + + KUNIT_ASSERT_NOT_NULL(test, info); - /* Test tiled format */ - info = drm_format_info(DRM_FORMAT_X0L2); - KUNIT_EXPECT_TRUE(test, info); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, 0, 0)); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, -1, 0)); - KUNIT_EXPECT_FALSE(test, drm_format_info_min_pitch(info, 1, 0)); + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0); + KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0); KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 2); KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 4); @@ -281,12 +328,25 @@ static void igt_check_drm_format_min_pitch_for_tiled_format(struct kunit *test) } static struct kunit_case drm_format_tests[] = { - KUNIT_CASE(igt_check_drm_format_block_width), - KUNIT_CASE(igt_check_drm_format_block_height), - KUNIT_CASE(igt_check_drm_format_min_pitch_for_single_plane), - KUNIT_CASE(igt_check_drm_format_min_pitch_for_multi_planar), - KUNIT_CASE(igt_check_drm_format_min_pitch_for_tiled_format), - { } + KUNIT_CASE(drm_test_format_block_width_invalid), + KUNIT_CASE(drm_test_format_block_width_one_plane), + KUNIT_CASE(drm_test_format_block_width_two_plane), + KUNIT_CASE(drm_test_format_block_width_three_plane), + KUNIT_CASE(drm_test_format_block_width_tiled), + KUNIT_CASE(drm_test_format_block_height_invalid), + KUNIT_CASE(drm_test_format_block_height_one_plane), + KUNIT_CASE(drm_test_format_block_height_two_plane), + KUNIT_CASE(drm_test_format_block_height_three_plane), + KUNIT_CASE(drm_test_format_block_height_tiled), + KUNIT_CASE(drm_test_format_min_pitch_invalid), + KUNIT_CASE(drm_test_format_min_pitch_one_plane_8bpp), + KUNIT_CASE(drm_test_format_min_pitch_one_plane_16bpp), + KUNIT_CASE(drm_test_format_min_pitch_one_plane_24bpp), + KUNIT_CASE(drm_test_format_min_pitch_one_plane_32bpp), + KUNIT_CASE(drm_test_format_min_pitch_two_plane), + KUNIT_CASE(drm_test_format_min_pitch_three_plane_8bpp), + KUNIT_CASE(drm_test_format_min_pitch_tiled), + {} }; static struct kunit_suite drm_format_test_suite = { diff --git a/drivers/gpu/drm/tests/drm_framebuffer_test.c b/drivers/gpu/drm/tests/drm_framebuffer_test.c index ec7a08ba4056..df235b7fdaa5 100644 --- a/drivers/gpu/drm/tests/drm_framebuffer_test.c +++ b/drivers/gpu/drm/tests/drm_framebuffer_test.c @@ -25,7 +25,7 @@ struct drm_framebuffer_test { const char *name; }; -static struct drm_framebuffer_test createbuffer_tests[] = { +static const struct drm_framebuffer_test drm_framebuffer_create_cases[] = { { .buffer_created = 1, .name = "ABGR8888 normal sizes", .cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_ABGR8888, .handles = { 1, 0, 0 }, .pitches = { 4 * 600, 0, 0 }, @@ -330,43 +330,50 @@ static struct drm_mode_config_funcs mock_config_funcs = { .fb_create = fb_create_mock, }; -static struct drm_device mock_drm_device = { - .mode_config = { - .min_width = MIN_WIDTH, - .max_width = MAX_WIDTH, - .min_height = MIN_HEIGHT, - .max_height = MAX_HEIGHT, - .funcs = &mock_config_funcs, - }, -}; +static int drm_framebuffer_test_init(struct kunit *test) +{ + struct drm_device *mock; + + mock = kunit_kzalloc(test, sizeof(*mock), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mock); -static int execute_drm_mode_fb_cmd2(struct drm_mode_fb_cmd2 *r) + mock->mode_config.min_width = MIN_WIDTH; + mock->mode_config.max_width = MAX_WIDTH; + mock->mode_config.min_height = MIN_HEIGHT; + mock->mode_config.max_height = MAX_HEIGHT; + mock->mode_config.funcs = &mock_config_funcs; + + test->priv = mock; + return 0; +} + +static void drm_test_framebuffer_create(struct kunit *test) { + const struct drm_framebuffer_test *params = test->param_value; + struct drm_device *mock = test->priv; int buffer_created = 0; - mock_drm_device.dev_private = &buffer_created; - drm_internal_framebuffer_create(&mock_drm_device, r, NULL); - return buffer_created; + mock->dev_private = &buffer_created; + drm_internal_framebuffer_create(mock, ¶ms->cmd, NULL); + KUNIT_EXPECT_EQ(test, params->buffer_created, buffer_created); } -static void igt_check_drm_framebuffer_create(struct kunit *test) +static void drm_framebuffer_test_to_desc(const struct drm_framebuffer_test *t, char *desc) { - int i = 0; - - for (i = 0; i < ARRAY_SIZE(createbuffer_tests); i++) { - KUNIT_EXPECT_EQ_MSG(test, createbuffer_tests[i].buffer_created, - execute_drm_mode_fb_cmd2(&createbuffer_tests[i].cmd), - "Test %d: \"%s\" failed\n", i, createbuffer_tests[i].name); - } + strcpy(desc, t->name); } +KUNIT_ARRAY_PARAM(drm_framebuffer_create, drm_framebuffer_create_cases, + drm_framebuffer_test_to_desc); + static struct kunit_case drm_framebuffer_tests[] = { - KUNIT_CASE(igt_check_drm_framebuffer_create), + KUNIT_CASE_PARAM(drm_test_framebuffer_create, drm_framebuffer_create_gen_params), { } }; static struct kunit_suite drm_framebuffer_test_suite = { .name = "drm_framebuffer", + .init = drm_framebuffer_test_init, .test_cases = drm_framebuffer_tests, }; diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c index 1e2c1aa524bd..659d1af4dca7 100644 --- a/drivers/gpu/drm/tests/drm_mm_test.c +++ b/drivers/gpu/drm/tests/drm_mm_test.c @@ -191,7 +191,7 @@ static bool assert_node(struct kunit *test, struct drm_mm_node *node, struct drm return ok; } -static void igt_mm_init(struct kunit *test) +static void drm_test_mm_init(struct kunit *test) { const unsigned int size = 4096; struct drm_mm mm; @@ -245,7 +245,7 @@ out: drm_mm_takedown(&mm); } -static void igt_mm_debug(struct kunit *test) +static void drm_test_mm_debug(struct kunit *test) { struct drm_mm mm; struct drm_mm_node nodes[2]; @@ -341,7 +341,7 @@ static bool check_reserve_boundaries(struct kunit *test, struct drm_mm *mm, return true; } -static int __igt_reserve(struct kunit *test, unsigned int count, u64 size) +static int __drm_test_mm_reserve(struct kunit *test, unsigned int count, u64 size) { DRM_RND_STATE(prng, random_seed); struct drm_mm mm; @@ -349,7 +349,7 @@ static int __igt_reserve(struct kunit *test, unsigned int count, u64 size) unsigned int *order, n, m, o = 0; int ret, err; - /* For exercising drm_mm_reserve_node(struct kunit *test, ), we want to check that + /* For exercising drm_mm_reserve_node(), we want to check that * reservations outside of the drm_mm range are rejected, and to * overlapping and otherwise already occupied ranges. Afterwards, * the tree and nodes should be intact. @@ -463,7 +463,7 @@ err: return ret; } -static void igt_mm_reserve(struct kunit *test) +static void drm_test_mm_reserve(struct kunit *test) { const unsigned int count = min_t(unsigned int, BIT(10), max_iterations); int n; @@ -471,9 +471,9 @@ static void igt_mm_reserve(struct kunit *test) for_each_prime_number_from(n, 1, 54) { u64 size = BIT_ULL(n); - KUNIT_ASSERT_FALSE(test, __igt_reserve(test, count, size - 1)); - KUNIT_ASSERT_FALSE(test, __igt_reserve(test, count, size)); - KUNIT_ASSERT_FALSE(test, __igt_reserve(test, count, size + 1)); + KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size - 1)); + KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size)); + KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size + 1)); cond_resched(); } @@ -524,7 +524,7 @@ static bool expect_insert_fail(struct kunit *test, struct drm_mm *mm, u64 size) return false; } -static int __igt_insert(struct kunit *test, unsigned int count, u64 size, bool replace) +static int __drm_test_mm_insert(struct kunit *test, unsigned int count, u64 size, bool replace) { DRM_RND_STATE(prng, random_seed); const struct insert_mode *mode; @@ -660,7 +660,7 @@ err_nodes: return ret; } -static void igt_mm_insert(struct kunit *test) +static void drm_test_mm_insert(struct kunit *test) { const unsigned int count = min_t(unsigned int, BIT(10), max_iterations); unsigned int n; @@ -668,20 +668,20 @@ static void igt_mm_insert(struct kunit *test) for_each_prime_number_from(n, 1, 54) { u64 size = BIT_ULL(n); - KUNIT_ASSERT_FALSE(test, __igt_insert(test, count, size - 1, false)); - KUNIT_ASSERT_FALSE(test, __igt_insert(test, count, size, false)); - KUNIT_ASSERT_FALSE(test, __igt_insert(test, count, size + 1, false)); + KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size - 1, false)); + KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size, false)); + KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size + 1, false)); cond_resched(); } } -static void igt_mm_replace(struct kunit *test) +static void drm_test_mm_replace(struct kunit *test) { const unsigned int count = min_t(unsigned int, BIT(10), max_iterations); unsigned int n; - /* Reuse igt_insert to exercise replacement by inserting a dummy node, + /* Reuse __drm_test_mm_insert to exercise replacement by inserting a dummy node, * then replacing it with the intended node. We want to check that * the tree is intact and all the information we need is carried * across to the target node. @@ -690,9 +690,9 @@ static void igt_mm_replace(struct kunit *test) for_each_prime_number_from(n, 1, 54) { u64 size = BIT_ULL(n); - KUNIT_ASSERT_FALSE(test, __igt_insert(test, count, size - 1, true)); - KUNIT_ASSERT_FALSE(test, __igt_insert(test, count, size, true)); - KUNIT_ASSERT_FALSE(test, __igt_insert(test, count, size + 1, true)); + KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size - 1, true)); + KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size, true)); + KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size + 1, true)); cond_resched(); } @@ -808,7 +808,8 @@ static bool assert_contiguous_in_range(struct kunit *test, struct drm_mm *mm, return true; } -static int __igt_insert_range(struct kunit *test, unsigned int count, u64 size, u64 start, u64 end) +static int __drm_test_mm_insert_range(struct kunit *test, unsigned int count, u64 size, + u64 start, u64 end) { const struct insert_mode *mode; struct drm_mm mm; @@ -820,7 +821,7 @@ static int __igt_insert_range(struct kunit *test, unsigned int count, u64 size, DRM_MM_BUG_ON(!size); DRM_MM_BUG_ON(end <= start); - /* Very similar to __igt_insert(struct kunit *test, ), but now instead of populating the + /* Very similar to __drm_test_mm_insert(), but now instead of populating the * full range of the drm_mm, we try to fill a small portion of it. */ @@ -921,7 +922,7 @@ static int insert_outside_range(struct kunit *test) return 0; } -static void igt_mm_insert_range(struct kunit *test) +static void drm_test_mm_insert_range(struct kunit *test) { const unsigned int count = min_t(unsigned int, BIT(13), max_iterations); unsigned int n; @@ -933,21 +934,21 @@ static void igt_mm_insert_range(struct kunit *test) const u64 size = BIT_ULL(n); const u64 max = count * size; - KUNIT_ASSERT_FALSE(test, __igt_insert_range(test, count, size, 0, max)); - KUNIT_ASSERT_FALSE(test, __igt_insert_range(test, count, size, 1, max)); - KUNIT_ASSERT_FALSE(test, __igt_insert_range(test, count, size, 0, max - 1)); - KUNIT_ASSERT_FALSE(test, __igt_insert_range(test, count, size, 0, max / 2)); - KUNIT_ASSERT_FALSE(test, __igt_insert_range(test, count, size, max / 2, max / 2)); - KUNIT_ASSERT_FALSE(test, __igt_insert_range(test, count, size, - max / 4 + 1, 3 * max / 4 - 1)); + KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max)); + KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 1, max)); + KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max - 1)); + KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max / 2)); + KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, + max / 2, max / 2)); + KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, + max / 4 + 1, 3 * max / 4 - 1)); cond_resched(); } } -static int prepare_igt_frag(struct kunit *test, struct drm_mm *mm, - struct drm_mm_node *nodes, unsigned int num_insert, - const struct insert_mode *mode) +static int prepare_frag(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *nodes, + unsigned int num_insert, const struct insert_mode *mode) { unsigned int size = 4096; unsigned int i; @@ -987,7 +988,7 @@ static u64 get_insert_time(struct kunit *test, struct drm_mm *mm, return ktime_to_ns(ktime_sub(ktime_get(), start)); } -static void igt_mm_frag(struct kunit *test) +static void drm_test_mm_frag(struct kunit *test) { struct drm_mm mm; const struct insert_mode *mode; @@ -997,15 +998,15 @@ static void igt_mm_frag(struct kunit *test) /* We need 4 * insert_size nodes to hold intermediate allocated * drm_mm nodes. - * 1 times for prepare_igt_frag(struct kunit *test, ) - * 1 times for get_insert_time(struct kunit *test, ) - * 2 times for get_insert_time(struct kunit *test, ) + * 1 times for prepare_frag() + * 1 times for get_insert_time() + * 2 times for get_insert_time() */ nodes = vzalloc(array_size(insert_size * 4, sizeof(*nodes))); KUNIT_ASSERT_TRUE(test, nodes); /* For BOTTOMUP and TOPDOWN, we first fragment the - * address space using prepare_igt_frag(struct kunit *test, ) and then try to verify + * address space using prepare_frag() and then try to verify * that insertions scale quadratically from 10k to 20k insertions */ drm_mm_init(&mm, 1, U64_MAX - 2); @@ -1016,7 +1017,7 @@ static void igt_mm_frag(struct kunit *test) mode->mode != DRM_MM_INSERT_HIGH) continue; - if (prepare_igt_frag(test, &mm, nodes, insert_size, mode)) + if (prepare_frag(test, &mm, nodes, insert_size, mode)) goto err; insert_time1 = get_insert_time(test, &mm, insert_size, @@ -1049,7 +1050,7 @@ err: vfree(nodes); } -static void igt_mm_align(struct kunit *test) +static void drm_test_mm_align(struct kunit *test) { const struct insert_mode *mode; const unsigned int max_count = min(8192u, max_prime); @@ -1096,7 +1097,7 @@ out: vfree(nodes); } -static void igt_align_pot(struct kunit *test, int max) +static void drm_test_mm_align_pot(struct kunit *test, int max) { struct drm_mm mm; struct drm_mm_node *node, *next; @@ -1133,14 +1134,14 @@ out: drm_mm_takedown(&mm); } -static void igt_mm_align32(struct kunit *test) +static void drm_test_mm_align32(struct kunit *test) { - igt_align_pot(test, 32); + drm_test_mm_align_pot(test, 32); } -static void igt_mm_align64(struct kunit *test) +static void drm_test_mm_align64(struct kunit *test) { - igt_align_pot(test, 64); + drm_test_mm_align_pot(test, 64); } static void show_scan(struct kunit *test, const struct drm_mm_scan *scan) @@ -1386,7 +1387,7 @@ static int evict_something(struct kunit *test, struct drm_mm *mm, return 0; } -static void igt_mm_evict(struct kunit *test) +static void drm_test_mm_evict(struct kunit *test) { DRM_RND_STATE(prng, random_seed); const unsigned int size = 8192; @@ -1477,7 +1478,7 @@ err_nodes: vfree(nodes); } -static void igt_mm_evict_range(struct kunit *test) +static void drm_test_mm_evict_range(struct kunit *test) { DRM_RND_STATE(prng, random_seed); const unsigned int size = 8192; @@ -1490,7 +1491,7 @@ static void igt_mm_evict_range(struct kunit *test) struct drm_mm_node *node, *next; unsigned int *order, n; - /* Like igt_evict() but now we are limiting the search to a + /* Like drm_test_mm_evict() but now we are limiting the search to a * small portion of the full drm_mm. */ @@ -1564,7 +1565,7 @@ static unsigned int node_index(const struct drm_mm_node *node) return div64_u64(node->start, node->size); } -static void igt_mm_topdown(struct kunit *test) +static void drm_test_mm_topdown(struct kunit *test) { const struct insert_mode *topdown = &insert_modes[TOPDOWN]; @@ -1671,7 +1672,7 @@ err_nodes: vfree(nodes); } -static void igt_mm_bottomup(struct kunit *test) +static void drm_test_mm_bottomup(struct kunit *test) { const struct insert_mode *bottomup = &insert_modes[BOTTOMUP]; @@ -1683,7 +1684,7 @@ static void igt_mm_bottomup(struct kunit *test) struct drm_mm_node *nodes, *node, *next; unsigned int *order, n, m, o = 0; - /* Like igt_topdown, but instead of searching for the last hole, + /* Like drm_test_mm_topdown, but instead of searching for the last hole, * we search for the first. */ @@ -1763,7 +1764,7 @@ err_nodes: vfree(nodes); } -static void __igt_once(struct kunit *test, unsigned int mode) +static void drm_test_mm_once(struct kunit *test, unsigned int mode) { struct drm_mm mm; struct drm_mm_node rsvd_lo, rsvd_hi, node; @@ -1806,14 +1807,14 @@ err: drm_mm_takedown(&mm); } -static void igt_mm_lowest(struct kunit *test) +static void drm_test_mm_lowest(struct kunit *test) { - __igt_once(test, DRM_MM_INSERT_LOW); + drm_test_mm_once(test, DRM_MM_INSERT_LOW); } -static void igt_mm_highest(struct kunit *test) +static void drm_test_mm_highest(struct kunit *test) { - __igt_once(test, DRM_MM_INSERT_HIGH); + drm_test_mm_once(test, DRM_MM_INSERT_HIGH); } static void separate_adjacent_colors(const struct drm_mm_node *node, @@ -1842,7 +1843,7 @@ static bool colors_abutt(struct kunit *test, const struct drm_mm_node *node) return false; } -static void igt_mm_color(struct kunit *test) +static void drm_test_mm_color(struct kunit *test) { const unsigned int count = min(4096u, max_iterations); const struct insert_mode *mode; @@ -2041,7 +2042,7 @@ static int evict_color(struct kunit *test, struct drm_mm *mm, u64 range_start, return 0; } -static void igt_mm_color_evict(struct kunit *test) +static void drm_test_mm_color_evict(struct kunit *test) { DRM_RND_STATE(prng, random_seed); const unsigned int total_size = min(8192u, max_iterations); @@ -2122,7 +2123,7 @@ err_nodes: vfree(nodes); } -static void igt_mm_color_evict_range(struct kunit *test) +static void drm_test_mm_color_evict_range(struct kunit *test) { DRM_RND_STATE(prng, random_seed); const unsigned int total_size = 8192; @@ -2136,7 +2137,7 @@ static void igt_mm_color_evict_range(struct kunit *test) struct drm_mm_node *node, *next; unsigned int *order, n; - /* Like igt_color_evict(), but limited to small portion of the full + /* Like drm_test_mm_color_evict(), but limited to small portion of the full * drm_mm range. */ @@ -2221,25 +2222,25 @@ module_param(max_iterations, uint, 0400); module_param(max_prime, uint, 0400); static struct kunit_case drm_mm_tests[] = { - KUNIT_CASE(igt_mm_init), - KUNIT_CASE(igt_mm_debug), - KUNIT_CASE(igt_mm_reserve), - KUNIT_CASE(igt_mm_insert), - KUNIT_CASE(igt_mm_replace), - KUNIT_CASE(igt_mm_insert_range), - KUNIT_CASE(igt_mm_frag), - KUNIT_CASE(igt_mm_align), - KUNIT_CASE(igt_mm_align32), - KUNIT_CASE(igt_mm_align64), - KUNIT_CASE(igt_mm_evict), - KUNIT_CASE(igt_mm_evict_range), - KUNIT_CASE(igt_mm_topdown), - KUNIT_CASE(igt_mm_bottomup), - KUNIT_CASE(igt_mm_lowest), - KUNIT_CASE(igt_mm_highest), - KUNIT_CASE(igt_mm_color), - KUNIT_CASE(igt_mm_color_evict), - KUNIT_CASE(igt_mm_color_evict_range), + KUNIT_CASE(drm_test_mm_init), + KUNIT_CASE(drm_test_mm_debug), + KUNIT_CASE(drm_test_mm_reserve), + KUNIT_CASE(drm_test_mm_insert), + KUNIT_CASE(drm_test_mm_replace), + KUNIT_CASE(drm_test_mm_insert_range), + KUNIT_CASE(drm_test_mm_frag), + KUNIT_CASE(drm_test_mm_align), + KUNIT_CASE(drm_test_mm_align32), + KUNIT_CASE(drm_test_mm_align64), + KUNIT_CASE(drm_test_mm_evict), + KUNIT_CASE(drm_test_mm_evict_range), + KUNIT_CASE(drm_test_mm_topdown), + KUNIT_CASE(drm_test_mm_bottomup), + KUNIT_CASE(drm_test_mm_lowest), + KUNIT_CASE(drm_test_mm_highest), + KUNIT_CASE(drm_test_mm_color), + KUNIT_CASE(drm_test_mm_color_evict), + KUNIT_CASE(drm_test_mm_color_evict_range), {} }; diff --git a/drivers/gpu/drm/tests/drm_plane_helper_test.c b/drivers/gpu/drm/tests/drm_plane_helper_test.c index be6cff0020ed..ec71af791f1f 100644 --- a/drivers/gpu/drm/tests/drm_plane_helper_test.c +++ b/drivers/gpu/drm/tests/drm_plane_helper_test.c @@ -73,7 +73,7 @@ static bool check_crtc_eq(struct drm_plane_state *plane_state, return true; } -static void igt_check_plane_state(struct kunit *test) +static void drm_test_check_plane_state(struct kunit *test) { int ret; @@ -223,7 +223,7 @@ static void igt_check_plane_state(struct kunit *test) } static struct kunit_case drm_plane_helper_test[] = { - KUNIT_CASE(igt_check_plane_state), + KUNIT_CASE(drm_test_check_plane_state), {} }; diff --git a/drivers/gpu/drm/tests/drm_rect_test.c b/drivers/gpu/drm/tests/drm_rect_test.c index c1dbefd49a4c..e9809ea32696 100644 --- a/drivers/gpu/drm/tests/drm_rect_test.c +++ b/drivers/gpu/drm/tests/drm_rect_test.c @@ -9,7 +9,7 @@ #include <drm/drm_rect.h> -static void igt_drm_rect_clip_scaled_div_by_zero(struct kunit *test) +static void drm_test_rect_clip_scaled_div_by_zero(struct kunit *test) { struct drm_rect src, dst, clip; bool visible; @@ -35,7 +35,7 @@ static void igt_drm_rect_clip_scaled_div_by_zero(struct kunit *test) KUNIT_EXPECT_FALSE_MSG(test, drm_rect_visible(&src), "Source should not be visible\n"); } -static void igt_drm_rect_clip_scaled_not_clipped(struct kunit *test) +static void drm_test_rect_clip_scaled_not_clipped(struct kunit *test) { struct drm_rect src, dst, clip; bool visible; @@ -83,7 +83,7 @@ static void igt_drm_rect_clip_scaled_not_clipped(struct kunit *test) KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n"); } -static void igt_drm_rect_clip_scaled_clipped(struct kunit *test) +static void drm_test_rect_clip_scaled_clipped(struct kunit *test) { struct drm_rect src, dst, clip; bool visible; @@ -173,7 +173,7 @@ static void igt_drm_rect_clip_scaled_clipped(struct kunit *test) KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n"); } -static void igt_drm_rect_clip_scaled_signed_vs_unsigned(struct kunit *test) +static void drm_test_rect_clip_scaled_signed_vs_unsigned(struct kunit *test) { struct drm_rect src, dst, clip; bool visible; @@ -197,10 +197,10 @@ static void igt_drm_rect_clip_scaled_signed_vs_unsigned(struct kunit *test) } static struct kunit_case drm_rect_tests[] = { - KUNIT_CASE(igt_drm_rect_clip_scaled_div_by_zero), - KUNIT_CASE(igt_drm_rect_clip_scaled_not_clipped), - KUNIT_CASE(igt_drm_rect_clip_scaled_clipped), - KUNIT_CASE(igt_drm_rect_clip_scaled_signed_vs_unsigned), + KUNIT_CASE(drm_test_rect_clip_scaled_div_by_zero), + KUNIT_CASE(drm_test_rect_clip_scaled_not_clipped), + KUNIT_CASE(drm_test_rect_clip_scaled_clipped), + KUNIT_CASE(drm_test_rect_clip_scaled_signed_vs_unsigned), { } }; diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c index b61db8f279e9..afb2879980c6 100644 --- a/drivers/gpu/drm/tidss/tidss_kms.c +++ b/drivers/gpu/drm/tidss/tidss_kms.c @@ -70,7 +70,7 @@ static int tidss_atomic_check(struct drm_device *ddev, * changes. This is needed for updating the plane positions in * tidss_crtc_position_planes() which is called from * crtc_atomic_enable() and crtc_atomic_flush(). We have an - * extra flag to to mark x,y-position changes and together + * extra flag to mark x,y-position changes and together * with zpos_changed the condition recognizes all the above * cases. */ diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c index 0ccf791301cb..cf77a8ce7398 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c @@ -105,11 +105,10 @@ int tilcdc_plane_init(struct drm_device *dev, struct tilcdc_drm_private *priv = dev->dev_private; int ret; - ret = drm_plane_init(dev, plane, 1, - &tilcdc_plane_funcs, - priv->pixelformats, - priv->num_pixelformats, - true); + ret = drm_universal_plane_init(dev, plane, 1, &tilcdc_plane_funcs, + priv->pixelformats, + priv->num_pixelformats, + NULL, DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) { dev_err(dev->dev, "Failed to initialize plane: %d\n", ret); return ret; diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c index 57f229a785bf..48c24aa8c28a 100644 --- a/drivers/gpu/drm/tiny/hx8357d.c +++ b/drivers/gpu/drm/tiny/hx8357d.c @@ -181,6 +181,7 @@ out_exit: } static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = { + .mode_valid = mipi_dbi_pipe_mode_valid, .enable = yx240qv29_enable, .disable = mipi_dbi_pipe_disable, .update = mipi_dbi_pipe_update, diff --git a/drivers/gpu/drm/tiny/ili9163.c b/drivers/gpu/drm/tiny/ili9163.c index 86439e50e304..9a1a5943bee0 100644 --- a/drivers/gpu/drm/tiny/ili9163.c +++ b/drivers/gpu/drm/tiny/ili9163.c @@ -100,6 +100,7 @@ out_exit: } static const struct drm_simple_display_pipe_funcs ili9163_pipe_funcs = { + .mode_valid = mipi_dbi_pipe_mode_valid, .enable = yx240qv29_enable, .disable = mipi_dbi_pipe_disable, .update = mipi_dbi_pipe_update, diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c index b8826a0b086b..69b265e78096 100644 --- a/drivers/gpu/drm/tiny/ili9341.c +++ b/drivers/gpu/drm/tiny/ili9341.c @@ -137,6 +137,7 @@ out_exit: } static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = { + .mode_valid = mipi_dbi_pipe_mode_valid, .enable = yx240qv29_enable, .disable = mipi_dbi_pipe_disable, .update = mipi_dbi_pipe_update, diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c index a5b433a8e0d8..c80028bb1d11 100644 --- a/drivers/gpu/drm/tiny/ili9486.c +++ b/drivers/gpu/drm/tiny/ili9486.c @@ -150,6 +150,7 @@ static void waveshare_enable(struct drm_simple_display_pipe *pipe, } static const struct drm_simple_display_pipe_funcs waveshare_pipe_funcs = { + .mode_valid = mipi_dbi_pipe_mode_valid, .enable = waveshare_enable, .disable = mipi_dbi_pipe_disable, .update = mipi_dbi_pipe_update, diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c index 27f1bd4da2f4..bc522fb3d94d 100644 --- a/drivers/gpu/drm/tiny/mi0283qt.c +++ b/drivers/gpu/drm/tiny/mi0283qt.c @@ -141,6 +141,7 @@ out_exit: } static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = { + .mode_valid = mipi_dbi_pipe_mode_valid, .enable = mi0283qt_enable, .disable = mipi_dbi_pipe_disable, .update = mipi_dbi_pipe_update, diff --git a/drivers/gpu/drm/tiny/panel-mipi-dbi.c b/drivers/gpu/drm/tiny/panel-mipi-dbi.c index a76fefa8adbc..955a61d628e7 100644 --- a/drivers/gpu/drm/tiny/panel-mipi-dbi.c +++ b/drivers/gpu/drm/tiny/panel-mipi-dbi.c @@ -212,6 +212,7 @@ out_exit: } static const struct drm_simple_display_pipe_funcs panel_mipi_dbi_pipe_funcs = { + .mode_valid = mipi_dbi_pipe_mode_valid, .enable = panel_mipi_dbi_enable, .disable = mipi_dbi_pipe_disable, .update = mipi_dbi_pipe_update, diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c index c4c1be3ac0b8..e62f4d16b2c6 100644 --- a/drivers/gpu/drm/tiny/repaper.c +++ b/drivers/gpu/drm/tiny/repaper.c @@ -621,6 +621,15 @@ static void power_off(struct repaper_epd *epd) gpiod_set_value_cansleep(epd->discharge, 0); } +static enum drm_mode_status repaper_pipe_mode_valid(struct drm_simple_display_pipe *pipe, + const struct drm_display_mode *mode) +{ + struct drm_crtc *crtc = &pipe->crtc; + struct repaper_epd *epd = drm_to_epd(crtc->dev); + + return drm_crtc_helper_mode_valid_fixed(crtc, mode, epd->mode); +} + static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) @@ -831,6 +840,7 @@ static void repaper_pipe_update(struct drm_simple_display_pipe *pipe, } static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = { + .mode_valid = repaper_pipe_mode_valid, .enable = repaper_pipe_enable, .disable = repaper_pipe_disable, .update = repaper_pipe_update, @@ -839,22 +849,8 @@ static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = { static int repaper_connector_get_modes(struct drm_connector *connector) { struct repaper_epd *epd = drm_to_epd(connector->dev); - struct drm_display_mode *mode; - - mode = drm_mode_duplicate(connector->dev, epd->mode); - if (!mode) { - DRM_ERROR("Failed to duplicate mode\n"); - return 0; - } - - drm_mode_set_name(mode); - mode->type |= DRM_MODE_TYPE_PREFERRED; - drm_mode_probed_add(connector, mode); - - connector->display_info.width_mm = mode->width_mm; - connector->display_info.height_mm = mode->height_mm; - return 1; + return drm_connector_helper_get_modes_fixed(connector, epd->mode); } static const struct drm_connector_helper_funcs repaper_connector_hfuncs = { diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index a81f91814595..18489779fb8a 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -31,16 +31,6 @@ #define DRIVER_MINOR 0 /* - * Assume a monitor resolution of 96 dpi to - * get a somewhat reasonable screen size. - */ -#define RES_MM(d) \ - (((d) * 254ul) / (96ul * 10ul)) - -#define SIMPLEDRM_MODE(hd, vd) \ - DRM_SIMPLE_MODE(hd, vd, RES_MM(hd), RES_MM(vd)) - -/* * Helpers for simplefb */ @@ -479,64 +469,46 @@ static const uint64_t simpledrm_primary_plane_format_modifiers[] = { DRM_FORMAT_MOD_INVALID }; -static int simpledrm_primary_plane_helper_atomic_check(struct drm_plane *plane, - struct drm_atomic_state *new_state) -{ - struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane); - struct drm_crtc *new_crtc = new_plane_state->crtc; - struct drm_crtc_state *new_crtc_state = NULL; - int ret; - - if (new_crtc) - new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_crtc); - - ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state, - DRM_PLANE_NO_SCALING, - DRM_PLANE_NO_SCALING, - false, false); - if (ret) - return ret; - else if (!new_plane_state->visible) - return 0; - - return 0; -} - static void simpledrm_primary_plane_helper_atomic_update(struct drm_plane *plane, - struct drm_atomic_state *old_state) + struct drm_atomic_state *state) { - struct drm_plane_state *plane_state = plane->state; - struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(old_state, plane); + struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane); struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); struct drm_framebuffer *fb = plane_state->fb; struct drm_device *dev = plane->dev; struct simpledrm_device *sdev = simpledrm_device_of_dev(dev); - struct iosys_map dst = IOSYS_MAP_INIT_VADDR(sdev->screen_base); - struct drm_rect src_clip, dst_clip; - int idx; + struct drm_atomic_helper_damage_iter iter; + struct drm_rect damage; + int ret, idx; - if (!fb) + ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); + if (ret) return; - if (!drm_atomic_helper_damage_merged(old_plane_state, plane_state, &src_clip)) - return; + if (!drm_dev_enter(dev, &idx)) + goto out_drm_gem_fb_end_cpu_access; - dst_clip = plane_state->dst; - if (!drm_rect_intersect(&dst_clip, &src_clip)) - return; + drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state); + drm_atomic_for_each_plane_damage(&iter, &damage) { + struct iosys_map dst = IOSYS_MAP_INIT_VADDR(sdev->screen_base); + struct drm_rect dst_clip = plane_state->dst; - if (!drm_dev_enter(dev, &idx)) - return; + if (!drm_rect_intersect(&dst_clip, &damage)) + continue; - iosys_map_incr(&dst, drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip)); - drm_fb_blit(&dst, &sdev->pitch, sdev->format->format, shadow_plane_state->data, fb, - &src_clip); + iosys_map_incr(&dst, drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip)); + drm_fb_blit(&dst, &sdev->pitch, sdev->format->format, shadow_plane_state->data, fb, + &damage); + } drm_dev_exit(idx); +out_drm_gem_fb_end_cpu_access: + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); } static void simpledrm_primary_plane_helper_atomic_disable(struct drm_plane *plane, - struct drm_atomic_state *old_state) + struct drm_atomic_state *state) { struct drm_device *dev = plane->dev; struct simpledrm_device *sdev = simpledrm_device_of_dev(dev); @@ -553,7 +525,7 @@ static void simpledrm_primary_plane_helper_atomic_disable(struct drm_plane *plan static const struct drm_plane_helper_funcs simpledrm_primary_plane_helper_funcs = { DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, - .atomic_check = simpledrm_primary_plane_helper_atomic_check, + .atomic_check = drm_plane_helper_atomic_check, .atomic_update = simpledrm_primary_plane_helper_atomic_update, .atomic_disable = simpledrm_primary_plane_helper_atomic_disable, }; @@ -570,15 +542,7 @@ static enum drm_mode_status simpledrm_crtc_helper_mode_valid(struct drm_crtc *cr { struct simpledrm_device *sdev = simpledrm_device_of_dev(crtc->dev); - if (mode->hdisplay != sdev->mode.hdisplay && - mode->vdisplay != sdev->mode.vdisplay) - return MODE_ONE_SIZE; - else if (mode->hdisplay != sdev->mode.hdisplay) - return MODE_ONE_WIDTH; - else if (mode->vdisplay != sdev->mode.vdisplay) - return MODE_ONE_HEIGHT; - - return MODE_OK; + return drm_crtc_helper_mode_valid_fixed(crtc, mode, &sdev->mode); } static int simpledrm_crtc_helper_atomic_check(struct drm_crtc *crtc, @@ -620,24 +584,8 @@ static const struct drm_encoder_funcs simpledrm_encoder_funcs = { static int simpledrm_connector_helper_get_modes(struct drm_connector *connector) { struct simpledrm_device *sdev = simpledrm_device_of_dev(connector->dev); - struct drm_display_mode *mode; - - mode = drm_mode_duplicate(connector->dev, &sdev->mode); - if (!mode) - return 0; - - if (mode->name[0] == '\0') - drm_mode_set_name(mode); - - mode->type |= DRM_MODE_TYPE_PREFERRED; - drm_mode_probed_add(connector, mode); - if (mode->width_mm) - connector->display_info.width_mm = mode->width_mm; - if (mode->height_mm) - connector->display_info.height_mm = mode->height_mm; - - return 1; + return drm_connector_helper_get_modes_fixed(connector, &sdev->mode); } static const struct drm_connector_helper_funcs simpledrm_connector_helper_funcs = { @@ -665,51 +613,17 @@ static const struct drm_mode_config_funcs simpledrm_mode_config_funcs = { static struct drm_display_mode simpledrm_mode(unsigned int width, unsigned int height) { - struct drm_display_mode mode = { SIMPLEDRM_MODE(width, height) }; - - mode.clock = mode.hdisplay * mode.vdisplay * 60 / 1000 /* kHz */; - drm_mode_set_name(&mode); - - return mode; -} - -static const uint32_t *simpledrm_device_formats(struct simpledrm_device *sdev, - size_t *nformats_out) -{ - struct drm_device *dev = &sdev->dev; - size_t i; - - if (sdev->nformats) - goto out; /* don't rebuild list on recurring calls */ - - /* native format goes first */ - sdev->formats[0] = sdev->format->format; - sdev->nformats = 1; - - /* default formats go second */ - for (i = 0; i < ARRAY_SIZE(simpledrm_primary_plane_formats); ++i) { - if (simpledrm_primary_plane_formats[i] == sdev->format->format) - continue; /* native format already went first */ - sdev->formats[sdev->nformats] = simpledrm_primary_plane_formats[i]; - sdev->nformats++; - } - /* - * TODO: The simpledrm driver converts framebuffers to the native - * format when copying them to device memory. If there are more - * formats listed than supported by the driver, the native format - * is not supported by the conversion helpers. Therefore *only* - * support the native format and add a conversion helper ASAP. + * Assume a monitor resolution of 96 dpi to + * get a somewhat reasonable screen size. */ - if (drm_WARN_ONCE(dev, i != sdev->nformats, - "format conversion helpers required for %p4cc", - &sdev->format->format)) { - sdev->nformats = 1; - } + const struct drm_display_mode mode = { + DRM_MODE_INIT(60, width, height, + DRM_MODE_RES_MM(width, 96ul), + DRM_MODE_RES_MM(height, 96ul)) + }; -out: - *nformats_out = sdev->nformats; - return sdev->formats; + return mode; } static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv, @@ -728,7 +642,6 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv, struct drm_encoder *encoder; struct drm_connector *connector; unsigned long max_width, max_height; - const uint32_t *formats; size_t nformats; int ret; @@ -779,8 +692,11 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv, drm_err(dev, "no simplefb configuration found\n"); return ERR_PTR(-ENODEV); } - if (!stride) - stride = DIV_ROUND_UP(drm_format_info_bpp(format, 0) * width, 8); + if (!stride) { + stride = drm_format_info_min_pitch(format, 0, width); + if (drm_WARN_ON(dev, !stride)) + return ERR_PTR(-EINVAL); + } sdev->mode = simpledrm_mode(width, height); sdev->format = format; @@ -840,11 +756,14 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv, /* Primary plane */ - formats = simpledrm_device_formats(sdev, &nformats); + nformats = drm_fb_build_fourcc_list(dev, &format->format, 1, + simpledrm_primary_plane_formats, + ARRAY_SIZE(simpledrm_primary_plane_formats), + sdev->formats, ARRAY_SIZE(sdev->formats)); primary_plane = &sdev->primary_plane; ret = drm_universal_plane_init(dev, primary_plane, 0, &simpledrm_primary_plane_funcs, - formats, nformats, + sdev->formats, nformats, simpledrm_primary_plane_format_modifiers, DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c index d2042a0f02dd..c36ba08acda1 100644 --- a/drivers/gpu/drm/tiny/st7735r.c +++ b/drivers/gpu/drm/tiny/st7735r.c @@ -133,6 +133,7 @@ out_exit: } static const struct drm_simple_display_pipe_funcs st7735r_pipe_funcs = { + .mode_valid = mipi_dbi_pipe_mode_valid, .enable = st7735r_pipe_enable, .disable = mipi_dbi_pipe_disable, .update = mipi_dbi_pipe_update, diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 1530982338e9..497ee1fdbad7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -402,6 +402,8 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map) struct ttm_resource *mem = bo->resource; int ret; + dma_resv_assert_held(bo->base.resv); + ret = ttm_mem_io_reserve(bo->bdev, mem); if (ret) return ret; @@ -460,6 +462,8 @@ void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map) { struct ttm_resource *mem = bo->resource; + dma_resv_assert_held(bo->base.resv); + if (iosys_map_is_null(map)) return; diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 5703277c6f52..91effdcefb6d 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -21,8 +21,14 @@ static int udl_usb_suspend(struct usb_interface *interface, pm_message_t message) { struct drm_device *dev = usb_get_intfdata(interface); + int ret; - return drm_mode_config_helper_suspend(dev); + ret = drm_mode_config_helper_suspend(dev); + if (ret) + return ret; + + udl_sync_pending_urbs(dev); + return 0; } static int udl_usb_resume(struct usb_interface *interface) @@ -32,6 +38,16 @@ static int udl_usb_resume(struct usb_interface *interface) return drm_mode_config_helper_resume(dev); } +static int udl_usb_reset_resume(struct usb_interface *interface) +{ + struct drm_device *dev = usb_get_intfdata(interface); + struct udl_device *udl = to_udl(dev); + + udl_select_std_channel(udl); + + return drm_mode_config_helper_resume(dev); +} + /* * FIXME: Dma-buf sharing requires DMA support by the importing device. * This function is a workaround to make USB devices work as well. @@ -140,6 +156,7 @@ static struct usb_driver udl_driver = { .disconnect = udl_usb_disconnect, .suspend = udl_usb_suspend, .resume = udl_usb_resume, + .reset_resume = udl_usb_reset_resume, .id_table = id_table, }; module_usb_driver(udl_driver); diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index 28aaf75d71cf..b4cc7cc568c7 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h @@ -39,7 +39,6 @@ struct urb_node { struct urb_list { struct list_head list; - struct list_head in_flight; spinlock_t lock; wait_queue_head_t sleep; int available; @@ -75,17 +74,10 @@ static inline struct usb_device *udl_to_usb_device(struct udl_device *udl) int udl_modeset_init(struct drm_device *dev); struct drm_connector *udl_connector_init(struct drm_device *dev); -struct urb *udl_get_urb_timeout(struct drm_device *dev, long timeout); - -#define GET_URB_TIMEOUT HZ -static inline struct urb *udl_get_urb(struct drm_device *dev) -{ - return udl_get_urb_timeout(dev, GET_URB_TIMEOUT); -} +struct urb *udl_get_urb(struct drm_device *dev); int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len); -int udl_sync_pending_urbs(struct drm_device *dev); -void udl_kill_pending_urbs(struct drm_device *dev); +void udl_sync_pending_urbs(struct drm_device *dev); void udl_urb_completion(struct urb *urb); int udl_init(struct udl_device *udl); @@ -95,6 +87,7 @@ int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr, u32 byte_offset, u32 device_byte_offset, u32 byte_width); int udl_drop_usb(struct drm_device *dev); +int udl_select_std_channel(struct udl_device *udl); #define CMD_WRITE_RAW8 "\xAF\x60" /**< 8 bit raw write command. */ #define CMD_WRITE_RL8 "\xAF\x61" /**< 8 bit run length command. */ diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index fdafbf8f3c3c..061cb88c08a2 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -20,9 +20,11 @@ #define NR_USB_REQUEST_CHANNEL 0x12 #define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE) -#define WRITES_IN_FLIGHT (4) +#define WRITES_IN_FLIGHT (20) #define MAX_VENDOR_DESCRIPTOR_SIZE 256 +static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout); + static int udl_parse_vendor_descriptor(struct udl_device *udl) { struct usb_device *udev = udl_to_usb_device(udl); @@ -92,7 +94,7 @@ success: /* * Need to ensure a channel is selected before submitting URBs */ -static int udl_select_std_channel(struct udl_device *udl) +int udl_select_std_channel(struct udl_device *udl) { static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7, 0x1C, 0x88, 0x5E, 0x15, @@ -126,6 +128,7 @@ void udl_urb_completion(struct urb *urb) if (urb->status) { if (!(urb->status == -ENOENT || urb->status == -ECONNRESET || + urb->status == -EPROTO || urb->status == -ESHUTDOWN)) { DRM_ERROR("%s - nonzero write bulk status received: %d\n", __func__, urb->status); @@ -135,7 +138,7 @@ void udl_urb_completion(struct urb *urb) urb->transfer_buffer_length = udl->urbs.size; /* reset to actual */ spin_lock_irqsave(&udl->urbs.lock, flags); - list_move(&unode->entry, &udl->urbs.list); + list_add_tail(&unode->entry, &udl->urbs.list); udl->urbs.available++; spin_unlock_irqrestore(&udl->urbs.lock, flags); @@ -145,15 +148,17 @@ void udl_urb_completion(struct urb *urb) static void udl_free_urb_list(struct drm_device *dev) { struct udl_device *udl = to_udl(dev); - int count = udl->urbs.count; struct urb_node *unode; struct urb *urb; DRM_DEBUG("Waiting for completes and freeing all render urbs\n"); /* keep waiting and freeing, until we've got 'em all */ - while (count--) { - urb = udl_get_urb_timeout(dev, MAX_SCHEDULE_TIMEOUT); + while (udl->urbs.count) { + spin_lock_irq(&udl->urbs.lock); + urb = udl_get_urb_locked(udl, MAX_SCHEDULE_TIMEOUT); + udl->urbs.count--; + spin_unlock_irq(&udl->urbs.lock); if (WARN_ON(!urb)) break; unode = urb->context; @@ -163,7 +168,8 @@ static void udl_free_urb_list(struct drm_device *dev) usb_free_urb(urb); kfree(unode); } - udl->urbs.count = 0; + + wake_up_all(&udl->urbs.sleep); } static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) @@ -176,16 +182,14 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) struct usb_device *udev = udl_to_usb_device(udl); spin_lock_init(&udl->urbs.lock); - -retry: - udl->urbs.size = size; INIT_LIST_HEAD(&udl->urbs.list); - INIT_LIST_HEAD(&udl->urbs.in_flight); - init_waitqueue_head(&udl->urbs.sleep); udl->urbs.count = 0; udl->urbs.available = 0; +retry: + udl->urbs.size = size; + while (udl->urbs.count * size < wanted_size) { unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL); if (!unode) @@ -228,43 +232,56 @@ retry: return udl->urbs.count; } -struct urb *udl_get_urb_timeout(struct drm_device *dev, long timeout) +static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout) { - struct udl_device *udl = to_udl(dev); - struct urb_node *unode = NULL; + struct urb_node *unode; - if (!udl->urbs.count) - return NULL; + assert_spin_locked(&udl->urbs.lock); /* Wait for an in-flight buffer to complete and get re-queued */ - spin_lock_irq(&udl->urbs.lock); if (!wait_event_lock_irq_timeout(udl->urbs.sleep, + !udl->urbs.count || !list_empty(&udl->urbs.list), udl->urbs.lock, timeout)) { DRM_INFO("wait for urb interrupted: available: %d\n", udl->urbs.available); - goto unlock; + return NULL; } + if (!udl->urbs.count) + return NULL; + unode = list_first_entry(&udl->urbs.list, struct urb_node, entry); - list_move(&unode->entry, &udl->urbs.in_flight); + list_del_init(&unode->entry); udl->urbs.available--; -unlock: - spin_unlock_irq(&udl->urbs.lock); return unode ? unode->urb : NULL; } +#define GET_URB_TIMEOUT HZ +struct urb *udl_get_urb(struct drm_device *dev) +{ + struct udl_device *udl = to_udl(dev); + struct urb *urb; + + spin_lock_irq(&udl->urbs.lock); + urb = udl_get_urb_locked(udl, GET_URB_TIMEOUT); + spin_unlock_irq(&udl->urbs.lock); + return urb; +} + int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len) { struct udl_device *udl = to_udl(dev); int ret; - if (WARN_ON(len > udl->urbs.size)) - return -EINVAL; - + if (WARN_ON(len > udl->urbs.size)) { + ret = -EINVAL; + goto error; + } urb->transfer_buffer_length = len; /* set to actual payload len */ ret = usb_submit_urb(urb, GFP_ATOMIC); + error: if (ret) { udl_urb_completion(urb); /* because no one else will */ DRM_ERROR("usb_submit_urb error %x\n", ret); @@ -273,36 +290,17 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len) } /* wait until all pending URBs have been processed */ -int udl_sync_pending_urbs(struct drm_device *dev) +void udl_sync_pending_urbs(struct drm_device *dev) { struct udl_device *udl = to_udl(dev); - int ret = 0; spin_lock_irq(&udl->urbs.lock); /* 2 seconds as a sane timeout */ if (!wait_event_lock_irq_timeout(udl->urbs.sleep, - list_empty(&udl->urbs.in_flight), + udl->urbs.available == udl->urbs.count, udl->urbs.lock, msecs_to_jiffies(2000))) - ret = -ETIMEDOUT; - spin_unlock_irq(&udl->urbs.lock); - return ret; -} - -/* kill pending URBs */ -void udl_kill_pending_urbs(struct drm_device *dev) -{ - struct udl_device *udl = to_udl(dev); - struct urb_node *unode; - - spin_lock_irq(&udl->urbs.lock); - while (!list_empty(&udl->urbs.in_flight)) { - unode = list_first_entry(&udl->urbs.in_flight, - struct urb_node, entry); - spin_unlock_irq(&udl->urbs.lock); - usb_kill_urb(unode->urb); - spin_lock_irq(&udl->urbs.lock); - } + drm_err(dev, "Timeout for syncing pending URBs\n"); spin_unlock_irq(&udl->urbs.lock); } @@ -354,7 +352,6 @@ int udl_drop_usb(struct drm_device *dev) { struct udl_device *udl = to_udl(dev); - udl_kill_pending_urbs(dev); udl_free_urb_list(dev); put_device(udl->dmadev); udl->dmadev = NULL; diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index 169110d8fc2e..ec6876f449f3 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -242,38 +242,15 @@ static long udl_log_cpp(unsigned int cpp) return __ffs(cpp); } -static int udl_aligned_damage_clip(struct drm_rect *clip, int x, int y, - int width, int height) -{ - int x1, x2; - - if (WARN_ON_ONCE(x < 0) || - WARN_ON_ONCE(y < 0) || - WARN_ON_ONCE(width < 0) || - WARN_ON_ONCE(height < 0)) - return -EINVAL; - - x1 = ALIGN_DOWN(x, sizeof(unsigned long)); - x2 = ALIGN(width + (x - x1), sizeof(unsigned long)) + x1; - - clip->x1 = x1; - clip->y1 = y; - clip->x2 = x2; - clip->y2 = y + height; - - return 0; -} - static int udl_handle_damage(struct drm_framebuffer *fb, const struct iosys_map *map, - int x, int y, int width, int height) + const struct drm_rect *clip) { struct drm_device *dev = fb->dev; void *vaddr = map->vaddr; /* TODO: Use mapping abstraction properly */ int i, ret; char *cmd; struct urb *urb; - struct drm_rect clip; int log_bpp; ret = udl_log_cpp(fb->format->cpp[0]); @@ -281,12 +258,6 @@ static int udl_handle_damage(struct drm_framebuffer *fb, return ret; log_bpp = ret; - ret = udl_aligned_damage_clip(&clip, x, y, width, height); - if (ret) - return ret; - else if ((clip.x2 > fb->width) || (clip.y2 > fb->height)) - return -EINVAL; - ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); if (ret) return ret; @@ -298,11 +269,11 @@ static int udl_handle_damage(struct drm_framebuffer *fb, } cmd = urb->transfer_buffer; - for (i = clip.y1; i < clip.y2; i++) { + for (i = clip->y1; i < clip->y2; i++) { const int line_offset = fb->pitches[0] * i; - const int byte_offset = line_offset + (clip.x1 << log_bpp); - const int dev_byte_offset = (fb->width * i + clip.x1) << log_bpp; - const int byte_width = (clip.x2 - clip.x1) << log_bpp; + const int byte_offset = line_offset + (clip->x1 << log_bpp); + const int dev_byte_offset = (fb->width * i + clip->x1) << log_bpp; + const int byte_width = drm_rect_width(clip) << log_bpp; ret = udl_render_hline(dev, log_bpp, &urb, (char *)vaddr, &cmd, byte_offset, dev_byte_offset, byte_width); @@ -355,6 +326,7 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe, struct udl_device *udl = to_udl(dev); struct drm_display_mode *mode = &crtc_state->mode; struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); + struct drm_rect clip = DRM_RECT_INIT(0, 0, fb->width, fb->height); char *buf; char *wrptr; int color_depth = UDL_COLOR_DEPTH_16BPP; @@ -380,10 +352,7 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe, udl->mode_buf_len = wrptr - buf; - udl_handle_damage(fb, &shadow_plane_state->data[0], 0, 0, fb->width, fb->height); - - if (!crtc_state->mode_changed) - return; + udl_handle_damage(fb, &shadow_plane_state->data[0], &clip); /* enable display */ udl_crtc_write_mode_to_hw(crtc); @@ -397,8 +366,6 @@ udl_simple_display_pipe_disable(struct drm_simple_display_pipe *pipe) struct urb *urb; char *buf; - udl_kill_pending_urbs(dev); - urb = udl_get_urb(dev); if (!urb) return; @@ -410,8 +377,6 @@ udl_simple_display_pipe_disable(struct drm_simple_display_pipe *pipe) buf = udl_dummy_render(buf); udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer); - - udl_sync_pending_urbs(dev); } static void @@ -427,8 +392,7 @@ udl_simple_display_pipe_update(struct drm_simple_display_pipe *pipe, return; if (drm_atomic_helper_damage_merged(old_plane_state, state, &rect)) - udl_handle_damage(fb, &shadow_plane_state->data[0], rect.x1, rect.y1, - rect.x2 - rect.x1, rect.y2 - rect.y1); + udl_handle_damage(fb, &shadow_plane_state->data[0], &rect); } static const struct drm_simple_display_pipe_funcs udl_simple_display_pipe_funcs = { @@ -483,6 +447,7 @@ int udl_modeset_init(struct drm_device *dev) format_count, NULL, connector); if (ret) return ret; + drm_plane_enable_fb_damage_clips(&udl->display_pipe.plane); drm_mode_config_reset(dev); diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c index 176ef2a6a731..b57844632dbd 100644 --- a/drivers/gpu/drm/udl/udl_transfer.c +++ b/drivers/gpu/drm/udl/udl_transfer.c @@ -25,46 +25,6 @@ #define MIN_RAW_PIX_BYTES 2 #define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES) -/* - * Trims identical data from front and back of line - * Sets new front buffer address and width - * And returns byte count of identical pixels - * Assumes CPU natural alignment (unsigned long) - * for back and front buffer ptrs and width - */ -#if 0 -static int udl_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes) -{ - int j, k; - const unsigned long *back = (const unsigned long *) bback; - const unsigned long *front = (const unsigned long *) *bfront; - const int width = *width_bytes / sizeof(unsigned long); - int identical = width; - int start = width; - int end = width; - - for (j = 0; j < width; j++) { - if (back[j] != front[j]) { - start = j; - break; - } - } - - for (k = width - 1; k > j; k--) { - if (back[k] != front[k]) { - end = k+1; - break; - } - } - - identical = start + (width - end); - *bfront = (u8 *) &front[start]; - *width_bytes = (end - start) * sizeof(unsigned long); - - return identical * sizeof(unsigned long); -} -#endif - static inline u16 pixel32_to_be16(const uint32_t pixel) { return (((pixel >> 3) & 0x001f) | @@ -220,8 +180,11 @@ int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr, u8 *cmd = *urb_buf_ptr; u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length; - if (WARN_ON(!(log_bpp == 1 || log_bpp == 2))) + if (WARN_ON(!(log_bpp == 1 || log_bpp == 2))) { + /* need to finish URB at error from this function */ + udl_urb_completion(urb); return -EINVAL; + } line_start = (u8 *) (front + byte_offset); next_pixel = line_start; diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo.h b/drivers/gpu/drm/vboxvideo/vboxvideo.h index a5de40fe1a76..f60d82504da0 100644 --- a/drivers/gpu/drm/vboxvideo/vboxvideo.h +++ b/drivers/gpu/drm/vboxvideo/vboxvideo.h @@ -43,7 +43,7 @@ * VBE_DISPI_INDEX_VBOX_VIDEO is used to read the configuration information * from the host and issue commands to the host. * - * The guest writes the VBE_DISPI_INDEX_VBOX_VIDEO index register, the the + * The guest writes the VBE_DISPI_INDEX_VBOX_VIDEO index register, the * following operations with the VBE data register can be performed: * * Operation Result diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 25299fbc083b..64f9feabf43e 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -124,6 +124,23 @@ static unsigned long long vc4_hdmi_encoder_compute_mode_clock(const struct drm_display_mode *mode, unsigned int bpc, enum vc4_hdmi_output_format fmt); +static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder) +{ + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + struct drm_display_info *display = &vc4_hdmi->connector.display_info; + + lockdep_assert_held(&vc4_hdmi->mutex); + + if (!display->is_hdmi) + return false; + + if (!display->hdmi.scdc.supported || + !display->hdmi.scdc.scrambling.supported) + return false; + + return true; +} + static bool vc4_hdmi_mode_needs_scrambling(const struct drm_display_mode *mode, unsigned int bpc, enum vc4_hdmi_output_format fmt) @@ -271,47 +288,172 @@ static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) {} #endif -static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder); +static int reset_pipe(struct drm_crtc *crtc, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_atomic_state *state; + struct drm_crtc_state *crtc_state; + int ret; + + state = drm_atomic_state_alloc(crtc->dev); + if (!state) + return -ENOMEM; -static enum drm_connector_status -vc4_hdmi_connector_detect(struct drm_connector *connector, bool force) + state->acquire_ctx = ctx; + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); + goto out; + } + + crtc_state->connectors_changed = true; + + ret = drm_atomic_commit(state); +out: + drm_atomic_state_put(state); + + return ret; +} + +static int vc4_hdmi_reset_link(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx) { + struct drm_device *drm = connector->dev; struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); - bool connected = false; + struct drm_encoder *encoder = &vc4_hdmi->encoder.base; + struct drm_connector_state *conn_state; + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + bool scrambling_needed; + u8 config; + int ret; - mutex_lock(&vc4_hdmi->mutex); + if (!connector) + return 0; + + ret = drm_modeset_lock(&drm->mode_config.connection_mutex, ctx); + if (ret) + return ret; + + conn_state = connector->state; + crtc = conn_state->crtc; + if (!crtc) + return 0; + + ret = drm_modeset_lock(&crtc->mutex, ctx); + if (ret) + return ret; + + crtc_state = crtc->state; + if (!crtc_state->active) + return 0; + + if (!vc4_hdmi_supports_scrambling(encoder)) + return 0; + + scrambling_needed = vc4_hdmi_mode_needs_scrambling(&vc4_hdmi->saved_adjusted_mode, + vc4_hdmi->output_bpc, + vc4_hdmi->output_format); + if (!scrambling_needed) + return 0; + + if (conn_state->commit && + !try_wait_for_completion(&conn_state->commit->hw_done)) + return 0; + + ret = drm_scdc_readb(connector->ddc, SCDC_TMDS_CONFIG, &config); + if (ret < 0) { + drm_err(drm, "Failed to read TMDS config: %d\n", ret); + return 0; + } + + if (!!(config & SCDC_SCRAMBLING_ENABLE) == scrambling_needed) + return 0; + + /* + * HDMI 2.0 says that one should not send scrambled data + * prior to configuring the sink scrambling, and that + * TMDS clock/data transmission should be suspended when + * changing the TMDS clock rate in the sink. So let's + * just do a full modeset here, even though some sinks + * would be perfectly happy if were to just reconfigure + * the SCDC settings on the fly. + */ + return reset_pipe(crtc, ctx); +} + +static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi, + struct drm_modeset_acquire_ctx *ctx, + enum drm_connector_status status) +{ + struct drm_connector *connector = &vc4_hdmi->connector; + struct edid *edid; + + /* + * NOTE: This function should really be called with + * vc4_hdmi->mutex held, but doing so results in reentrancy + * issues since cec_s_phys_addr_from_edid might call + * .adap_enable, which leads to that funtion being called with + * our mutex held. + * + * A similar situation occurs with + * drm_atomic_helper_connector_hdmi_reset_link() that will call + * into our KMS hooks if the scrambling was enabled. + * + * Concurrency isn't an issue at the moment since we don't share + * any state with any of the other frameworks so we can ignore + * the lock for now. + */ + + if (status == connector_status_disconnected) { + cec_phys_addr_invalidate(vc4_hdmi->cec_adap); + return; + } + + edid = drm_get_edid(connector, vc4_hdmi->ddc); + if (!edid) + return; + + cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid); + kfree(edid); + + vc4_hdmi_reset_link(connector, ctx); +} + +static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force) +{ + struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); + enum drm_connector_status status = connector_status_disconnected; + + /* + * NOTE: This function should really take vc4_hdmi->mutex, but + * doing so results in reentrancy issues since + * vc4_hdmi_handle_hotplug() can call into other functions that + * would take the mutex while it's held here. + * + * Concurrency isn't an issue at the moment since we don't share + * any state with any of the other frameworks so we can ignore + * the lock for now. + */ WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev)); if (vc4_hdmi->hpd_gpio) { if (gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) - connected = true; + status = connector_status_connected; } else { if (vc4_hdmi->variant->hp_detect && vc4_hdmi->variant->hp_detect(vc4_hdmi)) - connected = true; - } - - if (connected) { - if (connector->status != connector_status_connected) { - struct edid *edid = drm_get_edid(connector, vc4_hdmi->ddc); - - if (edid) { - cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid); - kfree(edid); - } - } - - vc4_hdmi_enable_scrambling(&vc4_hdmi->encoder.base); - pm_runtime_put(&vc4_hdmi->pdev->dev); - mutex_unlock(&vc4_hdmi->mutex); - return connector_status_connected; + status = connector_status_connected; } - cec_phys_addr_invalidate(vc4_hdmi->cec_adap); + vc4_hdmi_handle_hotplug(vc4_hdmi, ctx, status); pm_runtime_put(&vc4_hdmi->pdev->dev); - mutex_unlock(&vc4_hdmi->mutex); - return connector_status_disconnected; + + return status; } static int vc4_hdmi_connector_get_modes(struct drm_connector *connector) @@ -320,14 +462,21 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector) int ret = 0; struct edid *edid; - mutex_lock(&vc4_hdmi->mutex); + /* + * NOTE: This function should really take vc4_hdmi->mutex, but + * doing so results in reentrancy issues since + * cec_s_phys_addr_from_edid might call .adap_enable, which + * leads to that funtion being called with our mutex held. + * + * Concurrency isn't an issue at the moment since we don't share + * any state with any of the other frameworks so we can ignore + * the lock for now. + */ edid = drm_get_edid(connector, vc4_hdmi->ddc); cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid); - if (!edid) { - ret = -ENODEV; - goto out; - } + if (!edid) + return -ENODEV; drm_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); @@ -335,7 +484,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector) if (vc4_hdmi->disable_4kp60) { struct drm_device *drm = connector->dev; - struct drm_display_mode *mode; + const struct drm_display_mode *mode; list_for_each_entry(mode, &connector->probed_modes, head) { if (vc4_hdmi_mode_needs_scrambling(mode, 8, VC4_HDMI_OUTPUT_RGB)) { @@ -345,9 +494,6 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector) } } -out: - mutex_unlock(&vc4_hdmi->mutex); - return ret; } @@ -419,7 +565,6 @@ vc4_hdmi_connector_duplicate_state(struct drm_connector *connector) } static const struct drm_connector_funcs vc4_hdmi_connector_funcs = { - .detect = vc4_hdmi_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .reset = vc4_hdmi_connector_reset, .atomic_duplicate_state = vc4_hdmi_connector_duplicate_state, @@ -427,6 +572,7 @@ static const struct drm_connector_funcs vc4_hdmi_connector_funcs = { }; static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = { + .detect_ctx = vc4_hdmi_connector_detect_ctx, .get_modes = vc4_hdmi_connector_get_modes, .atomic_check = vc4_hdmi_connector_atomic_check, }; @@ -708,37 +854,19 @@ static void vc4_hdmi_set_infoframes(struct drm_encoder *encoder) vc4_hdmi_set_hdr_infoframe(encoder); } -static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder, - struct drm_display_mode *mode) -{ - struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); - struct drm_display_info *display = &vc4_hdmi->connector.display_info; - - lockdep_assert_held(&vc4_hdmi->mutex); - - if (!display->is_hdmi) - return false; - - if (!display->hdmi.scdc.supported || - !display->hdmi.scdc.scrambling.supported) - return false; - - return true; -} - #define SCRAMBLING_POLLING_DELAY_MS 1000 static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder) { struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); struct drm_device *drm = vc4_hdmi->connector.dev; - struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode; + const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode; unsigned long flags; int idx; lockdep_assert_held(&vc4_hdmi->mutex); - if (!vc4_hdmi_supports_scrambling(encoder, mode)) + if (!vc4_hdmi_supports_scrambling(encoder)) return; if (!vc4_hdmi_mode_needs_scrambling(mode, @@ -1077,7 +1205,7 @@ static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, struct drm_connector_state *state, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { struct drm_device *drm = vc4_hdmi->connector.dev; bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC; @@ -1141,7 +1269,7 @@ static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, struct drm_connector_state *state, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { struct drm_device *drm = vc4_hdmi->connector.dev; const struct vc4_hdmi_connector_state *vc4_state = @@ -1303,7 +1431,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, drm_atomic_get_new_connector_state(state, connector); struct vc4_hdmi_connector_state *vc4_conn_state = conn_state_to_vc4_hdmi_conn_state(conn_state); - struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode; + const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode; unsigned long tmds_char_rate = vc4_conn_state->tmds_char_rate; unsigned long bvb_rate, hsm_rate; unsigned long flags; @@ -1416,7 +1544,7 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder, struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); struct drm_device *drm = vc4_hdmi->connector.dev; struct drm_connector *connector = &vc4_hdmi->connector; - struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode; + const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode; struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(state, connector); unsigned long flags; @@ -1445,7 +1573,7 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder, { struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); struct drm_device *drm = vc4_hdmi->connector.dev; - struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode; + const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode; struct drm_display_info *display = &vc4_hdmi->connector.display_info; bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC; bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC; @@ -2668,17 +2796,6 @@ static int vc4_hdmi_cec_enable(struct cec_adapter *adap) int ret; int idx; - /* - * NOTE: This function should really take vc4_hdmi->mutex, but doing so - * results in a reentrancy since cec_s_phys_addr_from_edid() called in - * .detect or .get_modes might call .adap_enable, which leads to this - * function being called with that mutex held. - * - * Concurrency is not an issue for the moment since we don't share any - * state with KMS, so we can ignore the lock for now, but we need to - * keep it in mind if we were to change that assumption. - */ - if (!drm_dev_enter(drm, &idx)) /* * We can't return an error code, because the CEC @@ -2693,6 +2810,8 @@ static int vc4_hdmi_cec_enable(struct cec_adapter *adap) return ret; } + mutex_lock(&vc4_hdmi->mutex); + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); val = HDMI_READ(HDMI_CEC_CNTRL_5); @@ -2727,6 +2846,7 @@ static int vc4_hdmi_cec_enable(struct cec_adapter *adap) spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + mutex_unlock(&vc4_hdmi->mutex); drm_dev_exit(idx); return 0; @@ -2747,16 +2867,7 @@ static int vc4_hdmi_cec_disable(struct cec_adapter *adap) */ return 0; - /* - * NOTE: This function should really take vc4_hdmi->mutex, but doing so - * results in a reentrancy since cec_s_phys_addr_from_edid() called in - * .detect or .get_modes might call .adap_enable, which leads to this - * function being called with that mutex held. - * - * Concurrency is not an issue for the moment since we don't share any - * state with KMS, so we can ignore the lock for now, but we need to - * keep it in mind if we were to change that assumption. - */ + mutex_lock(&vc4_hdmi->mutex); spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); @@ -2768,6 +2879,8 @@ static int vc4_hdmi_cec_disable(struct cec_adapter *adap) spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + mutex_unlock(&vc4_hdmi->mutex); + pm_runtime_put(&vc4_hdmi->pdev->dev); drm_dev_exit(idx); @@ -2790,17 +2903,6 @@ static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) unsigned long flags; int idx; - /* - * NOTE: This function should really take vc4_hdmi->mutex, but doing so - * results in a reentrancy since cec_s_phys_addr_from_edid() called in - * .detect or .get_modes might call .adap_enable, which leads to this - * function being called with that mutex held. - * - * Concurrency is not an issue for the moment since we don't share any - * state with KMS, so we can ignore the lock for now, but we need to - * keep it in mind if we were to change that assumption. - */ - if (!drm_dev_enter(drm, &idx)) /* * We can't return an error code, because the CEC @@ -2809,11 +2911,13 @@ static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) */ return 0; + mutex_lock(&vc4_hdmi->mutex); spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_CEC_CNTRL_1, (HDMI_READ(HDMI_CEC_CNTRL_1) & ~VC4_HDMI_CEC_ADDR_MASK) | (log_addr & 0xf) << VC4_HDMI_CEC_ADDR_SHIFT); spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + mutex_unlock(&vc4_hdmi->mutex); drm_dev_exit(idx); @@ -2830,17 +2934,6 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, unsigned int i; int idx; - /* - * NOTE: This function should really take vc4_hdmi->mutex, but doing so - * results in a reentrancy since cec_s_phys_addr_from_edid() called in - * .detect or .get_modes might call .adap_enable, which leads to this - * function being called with that mutex held. - * - * Concurrency is not an issue for the moment since we don't share any - * state with KMS, so we can ignore the lock for now, but we need to - * keep it in mind if we were to change that assumption. - */ - if (!drm_dev_enter(dev, &idx)) return -ENODEV; @@ -2850,6 +2943,8 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, return -ENOMEM; } + mutex_lock(&vc4_hdmi->mutex); + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); for (i = 0; i < msg->len; i += 4) @@ -2869,7 +2964,7 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, HDMI_WRITE(HDMI_CEC_CNTRL_1, val); spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); - + mutex_unlock(&vc4_hdmi->mutex); drm_dev_exit(idx); return 0; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h index 99b0bc1297be..db823efb2563 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi.h @@ -72,7 +72,7 @@ struct vc4_hdmi_variant { /* Callback to configure the video timings in the HDMI block */ void (*set_timings)(struct vc4_hdmi *vc4_hdmi, struct drm_connector_state *state, - struct drm_display_mode *mode); + const struct drm_display_mode *mode); /* Callback to initialize the PHY according to the connector state */ void (*phy_init)(struct vc4_hdmi *vc4_hdmi, @@ -195,15 +195,7 @@ struct vc4_hdmi { /** * @mutex: Mutex protecting the driver access across multiple - * frameworks (KMS, ALSA). - * - * NOTE: While supported, CEC has been left out since - * cec_s_phys_addr_from_edid() might call .adap_enable and lead to a - * reentrancy issue between .get_modes (or .detect) and .adap_enable. - * Since we don't share any state between the CEC hooks and KMS', it's - * not a big deal. The only trouble might come from updating the CEC - * clock divider which might be affected by a modeset, but CEC should - * be resilient to that. + * frameworks (KMS, ALSA, CEC). */ struct mutex mutex; diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 3b1701607aae..5d05093014ac 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -47,7 +47,7 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev, struct virtio_gpu_fence_event *e = NULL; int ret; - if (!(vfpriv->ring_idx_mask & (1 << ring_idx))) + if (!(vfpriv->ring_idx_mask & BIT_ULL(ring_idx))) return 0; e = kzalloc(sizeof(*e), GFP_KERNEL); diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index f4319066adcc..c3a845220e10 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c @@ -105,11 +105,12 @@ static void vkms_plane_atomic_update(struct drm_plane *plane, struct drm_shadow_plane_state *shadow_plane_state; struct drm_framebuffer *fb = new_state->fb; struct vkms_frame_info *frame_info; - u32 fmt = fb->format->format; + u32 fmt; if (!new_state->crtc || !fb) return; + fmt = fb->format->format; vkms_plane_state = to_vkms_plane_state(new_state); shadow_plane_state = &vkms_plane_state->base; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index be19aa6e1f13..09e2d738aa87 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -877,7 +877,6 @@ static inline void vmw_user_resource_noref_release(void) /** * Buffer object helper functions - vmwgfx_bo.c */ -extern bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo); extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv, struct vmw_buffer_object *bo, struct ttm_placement *placement, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 2aceac7856e2..089046fa21be 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -1076,6 +1076,7 @@ int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data, if (desc_len < 0) { atomic_set(&dev_priv->mksstat_user_pids[slot], 0); + __free_page(page); return -EFAULT; } diff --git a/drivers/media/platform/renesas/vsp1/vsp1_drm.c b/drivers/media/platform/renesas/vsp1/vsp1_drm.c index 0c2507dc03d6..c6f25200982c 100644 --- a/drivers/media/platform/renesas/vsp1/vsp1_drm.c +++ b/drivers/media/platform/renesas/vsp1/vsp1_drm.c @@ -856,6 +856,8 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index, rpf->mem.addr[1] = cfg->mem[1]; rpf->mem.addr[2] = cfg->mem[2]; + rpf->format.flags = cfg->premult ? V4L2_PIX_FMT_FLAG_PREMUL_ALPHA : 0; + vsp1->drm->inputs[rpf_index].crop = cfg->src; vsp1->drm->inputs[rpf_index].compose = cfg->dst; vsp1->drm->inputs[rpf_index].zpos = cfg->zpos; diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index 59506ba6fc48..79305e4acce2 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2013-2020, Intel Corporation. All rights reserved. + * Copyright (c) 2013-2022, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -15,6 +15,7 @@ #include "mei_dev.h" #include "client.h" +#include "mkhi.h" #define MEI_UUID_NFC_INFO UUID_LE(0xd2de1625, 0x382d, 0x417d, \ 0x48, 0xa4, 0xef, 0xab, 0xba, 0x8a, 0x12, 0x06) @@ -89,20 +90,6 @@ struct mei_os_ver { u8 reserved2; } __packed; -#define MKHI_FEATURE_PTT 0x10 - -struct mkhi_rule_id { - __le16 rule_type; - u8 feature_id; - u8 reserved; -} __packed; - -struct mkhi_fwcaps { - struct mkhi_rule_id id; - u8 len; - u8 data[]; -} __packed; - struct mkhi_fw_ver_block { u16 minor; u8 major; @@ -115,22 +102,6 @@ struct mkhi_fw_ver { struct mkhi_fw_ver_block ver[MEI_MAX_FW_VER_BLOCKS]; } __packed; -#define MKHI_FWCAPS_GROUP_ID 0x3 -#define MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD 6 -#define MKHI_GEN_GROUP_ID 0xFF -#define MKHI_GEN_GET_FW_VERSION_CMD 0x2 -struct mkhi_msg_hdr { - u8 group_id; - u8 command; - u8 reserved; - u8 result; -} __packed; - -struct mkhi_msg { - struct mkhi_msg_hdr hdr; - u8 data[]; -} __packed; - #define MKHI_OSVER_BUF_LEN (sizeof(struct mkhi_msg_hdr) + \ sizeof(struct mkhi_fwcaps) + \ sizeof(struct mei_os_ver)) @@ -164,7 +135,6 @@ static int mei_osver(struct mei_cl_device *cldev) sizeof(struct mkhi_fw_ver)) #define MKHI_FWVER_LEN(__num) (sizeof(struct mkhi_msg_hdr) + \ sizeof(struct mkhi_fw_ver_block) * (__num)) -#define MKHI_RCV_TIMEOUT 500 /* receive timeout in msec */ static int mei_fwver(struct mei_cl_device *cldev) { char buf[MKHI_FWVER_BUF_LEN]; @@ -187,7 +157,7 @@ static int mei_fwver(struct mei_cl_device *cldev) ret = 0; bytes_recv = __mei_cl_recv(cldev->cl, buf, sizeof(buf), NULL, 0, - MKHI_RCV_TIMEOUT); + cldev->bus->timeouts.mkhi_recv); if (bytes_recv < 0 || (size_t)bytes_recv < MKHI_FWVER_LEN(1)) { /* * Should be at least one version block, @@ -218,6 +188,19 @@ static int mei_fwver(struct mei_cl_device *cldev) return ret; } +static int mei_gfx_memory_ready(struct mei_cl_device *cldev) +{ + struct mkhi_gfx_mem_ready req = {0}; + unsigned int mode = MEI_CL_IO_TX_INTERNAL; + + req.hdr.group_id = MKHI_GROUP_ID_GFX; + req.hdr.command = MKHI_GFX_MEMORY_READY_CMD_REQ; + req.flags = MKHI_GFX_MEM_READY_PXP_ALLOWED; + + dev_dbg(&cldev->dev, "Sending memory ready command\n"); + return __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req), 0, mode); +} + static void mei_mkhi_fix(struct mei_cl_device *cldev) { int ret; @@ -264,6 +247,39 @@ static void mei_gsc_mkhi_ver(struct mei_cl_device *cldev) dev_err(&cldev->dev, "FW version command failed %d\n", ret); mei_cldev_disable(cldev); } + +static void mei_gsc_mkhi_fix_ver(struct mei_cl_device *cldev) +{ + int ret; + + /* No need to enable the client if nothing is needed from it */ + if (!cldev->bus->fw_f_fw_ver_supported && + cldev->bus->pxp_mode != MEI_DEV_PXP_INIT) + return; + + ret = mei_cldev_enable(cldev); + if (ret) + return; + + if (cldev->bus->pxp_mode == MEI_DEV_PXP_INIT) { + ret = mei_gfx_memory_ready(cldev); + if (ret < 0) + dev_err(&cldev->dev, "memory ready command failed %d\n", ret); + else + dev_dbg(&cldev->dev, "memory ready command sent\n"); + /* we go to reset after that */ + cldev->bus->pxp_mode = MEI_DEV_PXP_SETUP; + goto out; + } + + ret = mei_fwver(cldev); + if (ret < 0) + dev_err(&cldev->dev, "FW version command failed %d\n", + ret); +out: + mei_cldev_disable(cldev); +} + /** * mei_wd - wd client on the bus, change protocol version * as the API has changed. @@ -503,6 +519,26 @@ static void vt_support(struct mei_cl_device *cldev) cldev->do_match = 1; } +/** + * pxp_is_ready - enable bus client if pxp is ready + * + * @cldev: me clients device + */ +static void pxp_is_ready(struct mei_cl_device *cldev) +{ + struct mei_device *bus = cldev->bus; + + switch (bus->pxp_mode) { + case MEI_DEV_PXP_READY: + case MEI_DEV_PXP_DEFAULT: + cldev->do_match = 1; + break; + default: + cldev->do_match = 0; + break; + } +} + #define MEI_FIXUP(_uuid, _hook) { _uuid, _hook } static struct mei_fixup { @@ -516,10 +552,10 @@ static struct mei_fixup { MEI_FIXUP(MEI_UUID_WD, mei_wd), MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix), MEI_FIXUP(MEI_UUID_IGSC_MKHI, mei_gsc_mkhi_ver), - MEI_FIXUP(MEI_UUID_IGSC_MKHI_FIX, mei_gsc_mkhi_ver), + MEI_FIXUP(MEI_UUID_IGSC_MKHI_FIX, mei_gsc_mkhi_fix_ver), MEI_FIXUP(MEI_UUID_HDCP, whitelist), MEI_FIXUP(MEI_UUID_ANY, vt_support), - MEI_FIXUP(MEI_UUID_PAVP, whitelist), + MEI_FIXUP(MEI_UUID_PAVP, pxp_is_ready), }; /** diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 31264ab2eb13..0b2fbe1335a7 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -870,7 +870,7 @@ static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb) } list_move_tail(&cb->list, &dev->ctrl_rd_list); - cl->timer_count = MEI_CONNECT_TIMEOUT; + cl->timer_count = dev->timeouts.connect; mei_schedule_stall_timer(dev); return 0; @@ -945,7 +945,7 @@ static int __mei_cl_disconnect(struct mei_cl *cl) wait_event_timeout(cl->wait, cl->state == MEI_FILE_DISCONNECT_REPLY || cl->state == MEI_FILE_DISCONNECTED, - mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + dev->timeouts.cl_connect); mutex_lock(&dev->device_lock); rets = cl->status; @@ -1065,7 +1065,7 @@ static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb) } list_move_tail(&cb->list, &dev->ctrl_rd_list); - cl->timer_count = MEI_CONNECT_TIMEOUT; + cl->timer_count = dev->timeouts.connect; mei_schedule_stall_timer(dev); return 0; } @@ -1164,7 +1164,7 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, cl->state == MEI_FILE_DISCONNECTED || cl->state == MEI_FILE_DISCONNECT_REQUIRED || cl->state == MEI_FILE_DISCONNECT_REPLY), - mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + dev->timeouts.cl_connect); mutex_lock(&dev->device_lock); if (!mei_cl_is_connected(cl)) { @@ -1562,7 +1562,7 @@ int mei_cl_notify_request(struct mei_cl *cl, cl->notify_en == request || cl->status || !mei_cl_is_connected(cl), - mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + dev->timeouts.cl_connect); mutex_lock(&dev->device_lock); if (cl->notify_en != request && !cl->status) @@ -2336,7 +2336,7 @@ int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp, mutex_unlock(&dev->device_lock); wait_event_timeout(cl->wait, cl->dma_mapped || cl->status, - mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + dev->timeouts.cl_connect); mutex_lock(&dev->device_lock); if (!cl->dma_mapped && !cl->status) @@ -2415,7 +2415,7 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp) mutex_unlock(&dev->device_lock); wait_event_timeout(cl->wait, !cl->dma_mapped || cl->status, - mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + dev->timeouts.cl_connect); mutex_lock(&dev->device_lock); if (cl->dma_mapped && !cl->status) diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c index 1ce61e9e24fc..3b098d4c8e3d 100644 --- a/drivers/misc/mei/debugfs.c +++ b/drivers/misc/mei/debugfs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2012-2016, Intel Corporation. All rights reserved + * Copyright (c) 2012-2022, Intel Corporation. All rights reserved * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -86,6 +86,20 @@ out: } DEFINE_SHOW_ATTRIBUTE(mei_dbgfs_active); +static const char *mei_dev_pxp_mode_str(enum mei_dev_pxp_mode state) +{ +#define MEI_PXP_MODE(state) case MEI_DEV_PXP_##state: return #state + switch (state) { + MEI_PXP_MODE(DEFAULT); + MEI_PXP_MODE(INIT); + MEI_PXP_MODE(SETUP); + MEI_PXP_MODE(READY); + default: + return "unknown"; + } +#undef MEI_PXP_MODE +} + static int mei_dbgfs_devstate_show(struct seq_file *m, void *unused) { struct mei_device *dev = m->private; @@ -112,6 +126,9 @@ static int mei_dbgfs_devstate_show(struct seq_file *m, void *unused) seq_printf(m, "pg: %s, %s\n", mei_pg_is_enabled(dev) ? "ENABLED" : "DISABLED", mei_pg_state_str(mei_pg_state(dev))); + + seq_printf(m, "pxp: %s\n", mei_dev_pxp_mode_str(dev->pxp_mode)); + return 0; } DEFINE_SHOW_ATTRIBUTE(mei_dbgfs_devstate); diff --git a/drivers/misc/mei/gsc-me.c b/drivers/misc/mei/gsc-me.c index c8145e9b62b6..75765e4df4ed 100644 --- a/drivers/misc/mei/gsc-me.c +++ b/drivers/misc/mei/gsc-me.c @@ -13,6 +13,7 @@ #include <linux/ktime.h> #include <linux/delay.h> #include <linux/pm_runtime.h> +#include <linux/kthread.h> #include "mei_dev.h" #include "hw-me.h" @@ -31,6 +32,17 @@ static int mei_gsc_read_hfs(const struct mei_device *dev, int where, u32 *val) return 0; } +static void mei_gsc_set_ext_op_mem(const struct mei_me_hw *hw, struct resource *mem) +{ + u32 low = lower_32_bits(mem->start); + u32 hi = upper_32_bits(mem->start); + u32 limit = (resource_size(mem) / SZ_4K) | GSC_EXT_OP_MEM_VALID; + + iowrite32(low, hw->mem_addr + H_GSC_EXT_OP_MEM_BASE_ADDR_LO_REG); + iowrite32(hi, hw->mem_addr + H_GSC_EXT_OP_MEM_BASE_ADDR_HI_REG); + iowrite32(limit, hw->mem_addr + H_GSC_EXT_OP_MEM_LIMIT_REG); +} + static int mei_gsc_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *aux_dev_id) { @@ -47,7 +59,7 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev, device = &aux_dev->dev; - dev = mei_me_dev_init(device, cfg); + dev = mei_me_dev_init(device, cfg, adev->slow_firmware); if (!dev) { ret = -ENOMEM; goto err; @@ -66,13 +78,33 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev, dev_set_drvdata(device, dev); - ret = devm_request_threaded_irq(device, hw->irq, - mei_me_irq_quick_handler, - mei_me_irq_thread_handler, - IRQF_ONESHOT, KBUILD_MODNAME, dev); - if (ret) { - dev_err(device, "irq register failed %d\n", ret); - goto err; + if (adev->ext_op_mem.start) { + mei_gsc_set_ext_op_mem(hw, &adev->ext_op_mem); + dev->pxp_mode = MEI_DEV_PXP_INIT; + } + + /* use polling */ + if (mei_me_hw_use_polling(hw)) { + mei_disable_interrupts(dev); + mei_clear_interrupts(dev); + init_waitqueue_head(&hw->wait_active); + hw->is_active = true; /* start in active mode for initialization */ + hw->polling_thread = kthread_run(mei_me_polling_thread, dev, + "kmegscirqd/%s", dev_name(device)); + if (IS_ERR(hw->polling_thread)) { + ret = PTR_ERR(hw->polling_thread); + dev_err(device, "unable to create kernel thread: %d\n", ret); + goto err; + } + } else { + ret = devm_request_threaded_irq(device, hw->irq, + mei_me_irq_quick_handler, + mei_me_irq_thread_handler, + IRQF_ONESHOT, KBUILD_MODNAME, dev); + if (ret) { + dev_err(device, "irq register failed %d\n", ret); + goto err; + } } pm_runtime_get_noresume(device); @@ -98,7 +130,8 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev, register_err: mei_stop(dev); - devm_free_irq(device, hw->irq, dev); + if (!mei_me_hw_use_polling(hw)) + devm_free_irq(device, hw->irq, dev); err: dev_err(device, "probe failed: %d\n", ret); @@ -119,12 +152,17 @@ static void mei_gsc_remove(struct auxiliary_device *aux_dev) mei_stop(dev); + hw = to_me_hw(dev); + if (mei_me_hw_use_polling(hw)) + kthread_stop(hw->polling_thread); + mei_deregister(dev); pm_runtime_disable(&aux_dev->dev); mei_disable_interrupts(dev); - devm_free_irq(&aux_dev->dev, hw->irq, dev); + if (!mei_me_hw_use_polling(hw)) + devm_free_irq(&aux_dev->dev, hw->irq, dev); } static int __maybe_unused mei_gsc_pm_suspend(struct device *device) @@ -144,11 +182,22 @@ static int __maybe_unused mei_gsc_pm_suspend(struct device *device) static int __maybe_unused mei_gsc_pm_resume(struct device *device) { struct mei_device *dev = dev_get_drvdata(device); + struct auxiliary_device *aux_dev; + struct mei_aux_device *adev; int err; + struct mei_me_hw *hw; if (!dev) return -ENODEV; + hw = to_me_hw(dev); + aux_dev = to_auxiliary_dev(device); + adev = auxiliary_dev_to_mei_aux_dev(aux_dev); + if (adev->ext_op_mem.start) { + mei_gsc_set_ext_op_mem(hw, &adev->ext_op_mem); + dev->pxp_mode = MEI_DEV_PXP_INIT; + } + err = mei_restart(dev); if (err) return err; @@ -185,6 +234,9 @@ static int __maybe_unused mei_gsc_pm_runtime_suspend(struct device *device) if (mei_write_is_idle(dev)) { hw = to_me_hw(dev); hw->pg_state = MEI_PG_ON; + + if (mei_me_hw_use_polling(hw)) + hw->is_active = false; ret = 0; } else { ret = -EAGAIN; @@ -209,6 +261,11 @@ static int __maybe_unused mei_gsc_pm_runtime_resume(struct device *device) hw = to_me_hw(dev); hw->pg_state = MEI_PG_OFF; + if (mei_me_hw_use_polling(hw)) { + hw->is_active = true; + wake_up(&hw->wait_active); + } + mutex_unlock(&dev->device_lock); irq_ret = mei_me_irq_thread_handler(1, dev); diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index cf2b8261da14..de712cbf5d07 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ #include <linux/export.h> @@ -232,7 +232,7 @@ int mei_hbm_start_wait(struct mei_device *dev) mutex_unlock(&dev->device_lock); ret = wait_event_timeout(dev->wait_hbm_start, dev->hbm_state != MEI_HBM_STARTING, - mei_secs_to_jiffies(MEI_HBM_TIMEOUT)); + dev->timeouts.hbm); mutex_lock(&dev->device_lock); if (ret == 0 && (dev->hbm_state <= MEI_HBM_STARTING)) { @@ -275,7 +275,7 @@ int mei_hbm_start_req(struct mei_device *dev) } dev->hbm_state = MEI_HBM_STARTING; - dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + dev->init_clients_timer = dev->timeouts.client_init; mei_schedule_stall_timer(dev); return 0; } @@ -316,7 +316,7 @@ static int mei_hbm_dma_setup_req(struct mei_device *dev) } dev->hbm_state = MEI_HBM_DR_SETUP; - dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + dev->init_clients_timer = dev->timeouts.client_init; mei_schedule_stall_timer(dev); return 0; } @@ -351,7 +351,7 @@ static int mei_hbm_capabilities_req(struct mei_device *dev) } dev->hbm_state = MEI_HBM_CAP_SETUP; - dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + dev->init_clients_timer = dev->timeouts.client_init; mei_schedule_stall_timer(dev); return 0; } @@ -385,7 +385,7 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev) return ret; } dev->hbm_state = MEI_HBM_ENUM_CLIENTS; - dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + dev->init_clients_timer = dev->timeouts.client_init; mei_schedule_stall_timer(dev); return 0; } @@ -751,7 +751,7 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx) return ret; } - dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; + dev->init_clients_timer = dev->timeouts.client_init; mei_schedule_stall_timer(dev); return 0; diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 15e8e2b322b1..99966cd3e7d8 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (c) 2003-2019, Intel Corporation. All rights reserved. + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ #ifndef _MEI_HW_MEI_REGS_H_ @@ -127,6 +127,8 @@ # define PCI_CFG_HFS_3_FW_SKU_SPS 0x00000060 #define PCI_CFG_HFS_4 0x64 #define PCI_CFG_HFS_5 0x68 +# define GSC_CFG_HFS_5_BOOT_TYPE_MSK 0x00000003 +# define GSC_CFG_HFS_5_BOOT_TYPE_PXP 3 #define PCI_CFG_HFS_6 0x6C /* MEI registers */ @@ -143,6 +145,11 @@ /* H_D0I3C - D0I3 Control */ #define H_D0I3C 0x800 +#define H_GSC_EXT_OP_MEM_BASE_ADDR_LO_REG 0x100 +#define H_GSC_EXT_OP_MEM_BASE_ADDR_HI_REG 0x104 +#define H_GSC_EXT_OP_MEM_LIMIT_REG 0x108 +#define GSC_EXT_OP_MEM_VALID BIT(31) + /* register bits of H_CSR (Host Control Status register) */ /* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */ #define H_CBD 0xFF000000 diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 3a95fe7d4e33..9e2f781c6ed5 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -10,6 +10,7 @@ #include <linux/interrupt.h> #include <linux/pm_runtime.h> #include <linux/sizes.h> +#include <linux/delay.h> #include "mei_dev.h" #include "hbm.h" @@ -327,9 +328,12 @@ static void mei_me_intr_clear(struct mei_device *dev) */ static void mei_me_intr_enable(struct mei_device *dev) { - u32 hcsr = mei_hcsr_read(dev); + u32 hcsr; + + if (mei_me_hw_use_polling(to_me_hw(dev))) + return; - hcsr |= H_CSR_IE_MASK; + hcsr = mei_hcsr_read(dev) | H_CSR_IE_MASK; mei_hcsr_set(dev, hcsr); } @@ -354,6 +358,9 @@ static void mei_me_synchronize_irq(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); + if (mei_me_hw_use_polling(hw)) + return; + synchronize_irq(hw->irq); } @@ -380,7 +387,10 @@ static void mei_me_host_set_ready(struct mei_device *dev) { u32 hcsr = mei_hcsr_read(dev); - hcsr |= H_CSR_IE_MASK | H_IG | H_RDY; + if (!mei_me_hw_use_polling(to_me_hw(dev))) + hcsr |= H_CSR_IE_MASK; + + hcsr |= H_IG | H_RDY; mei_hcsr_set(dev, hcsr); } @@ -424,6 +434,29 @@ static bool mei_me_hw_is_resetting(struct mei_device *dev) } /** + * mei_gsc_pxp_check - check for gsc firmware entering pxp mode + * + * @dev: the device structure + */ +static void mei_gsc_pxp_check(struct mei_device *dev) +{ + struct mei_me_hw *hw = to_me_hw(dev); + u32 fwsts5 = 0; + + if (dev->pxp_mode == MEI_DEV_PXP_DEFAULT) + return; + + hw->read_fws(dev, PCI_CFG_HFS_5, &fwsts5); + trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_5", PCI_CFG_HFS_5, fwsts5); + if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) { + dev_dbg(dev->dev, "pxp mode is ready 0x%08x\n", fwsts5); + dev->pxp_mode = MEI_DEV_PXP_READY; + } else { + dev_dbg(dev->dev, "pxp mode is not ready 0x%08x\n", fwsts5); + } +} + +/** * mei_me_hw_ready_wait - wait until the me(hw) has turned ready * or timeout is reached * @@ -435,13 +468,15 @@ static int mei_me_hw_ready_wait(struct mei_device *dev) mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_hw_ready, dev->recvd_hw_ready, - mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT)); + dev->timeouts.hw_ready); mutex_lock(&dev->device_lock); if (!dev->recvd_hw_ready) { dev_err(dev->dev, "wait hw ready failed\n"); return -ETIME; } + mei_gsc_pxp_check(dev); + mei_me_hw_reset_release(dev); dev->recvd_hw_ready = false; return 0; @@ -697,7 +732,6 @@ static void mei_me_pg_unset(struct mei_device *dev) static int mei_me_pg_legacy_enter_sync(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); - unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); int ret; dev->pg_event = MEI_PG_EVENT_WAIT; @@ -708,7 +742,8 @@ static int mei_me_pg_legacy_enter_sync(struct mei_device *dev) mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_pg, - dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); + dev->pg_event == MEI_PG_EVENT_RECEIVED, + dev->timeouts.pgi); mutex_lock(&dev->device_lock); if (dev->pg_event == MEI_PG_EVENT_RECEIVED) { @@ -734,7 +769,6 @@ static int mei_me_pg_legacy_enter_sync(struct mei_device *dev) static int mei_me_pg_legacy_exit_sync(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); - unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); int ret; if (dev->pg_event == MEI_PG_EVENT_RECEIVED) @@ -746,7 +780,8 @@ static int mei_me_pg_legacy_exit_sync(struct mei_device *dev) mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_pg, - dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); + dev->pg_event == MEI_PG_EVENT_RECEIVED, + dev->timeouts.pgi); mutex_lock(&dev->device_lock); reply: @@ -762,7 +797,8 @@ reply: mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_pg, - dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout); + dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, + dev->timeouts.pgi); mutex_lock(&dev->device_lock); if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED) @@ -877,8 +913,6 @@ static u32 mei_me_d0i3_unset(struct mei_device *dev) static int mei_me_d0i3_enter_sync(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); - unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT); - unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); int ret; u32 reg; @@ -900,7 +934,8 @@ static int mei_me_d0i3_enter_sync(struct mei_device *dev) mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_pg, - dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout); + dev->pg_event == MEI_PG_EVENT_RECEIVED, + dev->timeouts.pgi); mutex_lock(&dev->device_lock); if (dev->pg_event != MEI_PG_EVENT_RECEIVED) { @@ -920,7 +955,8 @@ static int mei_me_d0i3_enter_sync(struct mei_device *dev) mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_pg, - dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout); + dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, + dev->timeouts.d0i3); mutex_lock(&dev->device_lock); if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) { @@ -980,7 +1016,6 @@ on: static int mei_me_d0i3_exit_sync(struct mei_device *dev) { struct mei_me_hw *hw = to_me_hw(dev); - unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT); int ret; u32 reg; @@ -1003,7 +1038,8 @@ static int mei_me_d0i3_exit_sync(struct mei_device *dev) mutex_unlock(&dev->device_lock); wait_event_timeout(dev->wait_pg, - dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout); + dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, + dev->timeouts.d0i3); mutex_lock(&dev->device_lock); if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) { @@ -1176,7 +1212,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) hcsr |= H_RST | H_IG | H_CSR_IS_MASK; - if (!intr_enable) + if (!intr_enable || mei_me_hw_use_polling(to_me_hw(dev))) hcsr &= ~H_CSR_IE_MASK; dev->recvd_hw_ready = false; @@ -1259,7 +1295,8 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) /* check if ME wants a reset */ if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { - dev_warn(dev->dev, "FW not ready: resetting.\n"); + dev_warn(dev->dev, "FW not ready: resetting: dev_state = %d pxp = %d\n", + dev->dev_state, dev->pxp_mode); if (dev->dev_state == MEI_DEV_POWERING_DOWN || dev->dev_state == MEI_DEV_POWER_DOWN) mei_cl_all_disconnect(dev); @@ -1331,6 +1368,66 @@ end: } EXPORT_SYMBOL_GPL(mei_me_irq_thread_handler); +#define MEI_POLLING_TIMEOUT_ACTIVE 100 +#define MEI_POLLING_TIMEOUT_IDLE 500 + +/** + * mei_me_polling_thread - interrupt register polling thread + * + * The thread monitors the interrupt source register and calls + * mei_me_irq_thread_handler() to handle the firmware + * input. + * + * The function polls in MEI_POLLING_TIMEOUT_ACTIVE timeout + * in case there was an event, in idle case the polling + * time increases yet again by MEI_POLLING_TIMEOUT_ACTIVE + * up to MEI_POLLING_TIMEOUT_IDLE. + * + * @_dev: mei device + * + * Return: always 0 + */ +int mei_me_polling_thread(void *_dev) +{ + struct mei_device *dev = _dev; + irqreturn_t irq_ret; + long polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE; + + dev_dbg(dev->dev, "kernel thread is running\n"); + while (!kthread_should_stop()) { + struct mei_me_hw *hw = to_me_hw(dev); + u32 hcsr; + + wait_event_timeout(hw->wait_active, + hw->is_active || kthread_should_stop(), + msecs_to_jiffies(MEI_POLLING_TIMEOUT_IDLE)); + + if (kthread_should_stop()) + break; + + hcsr = mei_hcsr_read(dev); + if (me_intr_src(hcsr)) { + polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE; + irq_ret = mei_me_irq_thread_handler(1, dev); + if (irq_ret != IRQ_HANDLED) + dev_err(dev->dev, "irq_ret %d\n", irq_ret); + } else { + /* + * Increase timeout by MEI_POLLING_TIMEOUT_ACTIVE + * up to MEI_POLLING_TIMEOUT_IDLE + */ + polling_timeout = clamp_val(polling_timeout + MEI_POLLING_TIMEOUT_ACTIVE, + MEI_POLLING_TIMEOUT_ACTIVE, + MEI_POLLING_TIMEOUT_IDLE); + } + + schedule_timeout_interruptible(msecs_to_jiffies(polling_timeout)); + } + + return 0; +} +EXPORT_SYMBOL_GPL(mei_me_polling_thread); + static const struct mei_hw_ops mei_me_hw_ops = { .trc_status = mei_me_trc_status, @@ -1636,11 +1733,12 @@ EXPORT_SYMBOL_GPL(mei_me_get_cfg); * * @parent: device associated with physical device (pci/platform) * @cfg: per device generation config + * @slow_fw: configure longer timeouts as FW is slow * * Return: The mei_device pointer on success, NULL on failure. */ struct mei_device *mei_me_dev_init(struct device *parent, - const struct mei_cfg *cfg) + const struct mei_cfg *cfg, bool slow_fw) { struct mei_device *dev; struct mei_me_hw *hw; @@ -1655,7 +1753,7 @@ struct mei_device *mei_me_dev_init(struct device *parent, for (i = 0; i < DMA_DSCR_NUM; i++) dev->dr_dscr[i].size = cfg->dma_size[i]; - mei_device_init(dev, parent, &mei_me_hw_ops); + mei_device_init(dev, parent, slow_fw, &mei_me_hw_ops); hw->cfg = cfg; dev->fw_f_fw_ver_supported = cfg->fw_ver_supported; diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h index a071c645e905..95cf830b7c7b 100644 --- a/drivers/misc/mei/hw-me.h +++ b/drivers/misc/mei/hw-me.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2012-2020, Intel Corporation. All rights reserved. + * Copyright (c) 2012-2022, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -51,6 +51,9 @@ struct mei_cfg { * @d0i3_supported: di03 support * @hbuf_depth: depth of hardware host/write buffer in slots * @read_fws: read FW status register handler + * @polling_thread: interrupt polling thread + * @wait_active: the polling thread activity wait queue + * @is_active: the device is active */ struct mei_me_hw { const struct mei_cfg *cfg; @@ -60,10 +63,19 @@ struct mei_me_hw { bool d0i3_supported; u8 hbuf_depth; int (*read_fws)(const struct mei_device *dev, int where, u32 *val); + /* polling */ + struct task_struct *polling_thread; + wait_queue_head_t wait_active; + bool is_active; }; #define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw) +static inline bool mei_me_hw_use_polling(const struct mei_me_hw *hw) +{ + return hw->irq < 0; +} + /** * enum mei_cfg_idx - indices to platform specific configurations. * @@ -120,12 +132,13 @@ enum mei_cfg_idx { const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx); struct mei_device *mei_me_dev_init(struct device *parent, - const struct mei_cfg *cfg); + const struct mei_cfg *cfg, bool slow_fw); int mei_me_pg_enter_sync(struct mei_device *dev); int mei_me_pg_exit_sync(struct mei_device *dev); irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id); irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id); +int mei_me_polling_thread(void *_dev); #endif /* _MEI_INTERFACE_H_ */ diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c index 00652c137cc7..9862c6cd3e32 100644 --- a/drivers/misc/mei/hw-txe.c +++ b/drivers/misc/mei/hw-txe.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2013-2020, Intel Corporation. All rights reserved. + * Copyright (c) 2013-2022, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -1201,7 +1201,7 @@ struct mei_device *mei_txe_dev_init(struct pci_dev *pdev) if (!dev) return NULL; - mei_device_init(dev, &pdev->dev, &mei_txe_hw_ops); + mei_device_init(dev, &pdev->dev, false, &mei_txe_hw_ops); hw = to_txe_hw(dev); diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index b46077b17114..e7e020dba6b1 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2003-2020, Intel Corporation. All rights reserved + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -16,11 +16,16 @@ #define MEI_CONNECT_TIMEOUT 3 /* HPS: at least 2 seconds */ #define MEI_CL_CONNECT_TIMEOUT 15 /* HPS: Client Connect Timeout */ +#define MEI_CL_CONNECT_TIMEOUT_SLOW 30 /* HPS: Client Connect Timeout, slow FW */ #define MEI_CLIENTS_INIT_TIMEOUT 15 /* HPS: Clients Enumeration Timeout */ #define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */ #define MEI_D0I3_TIMEOUT 5 /* D0i3 set/unset max response time */ #define MEI_HBM_TIMEOUT 1 /* 1 second */ +#define MEI_HBM_TIMEOUT_SLOW 5 /* 5 second, slow FW */ + +#define MKHI_RCV_TIMEOUT 500 /* receive timeout in msec */ +#define MKHI_RCV_TIMEOUT_SLOW 10000 /* receive timeout in msec, slow FW */ /* * FW page size for DMA allocations diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index eb052005ca86..bac8852aad51 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2012-2019, Intel Corporation. All rights reserved. + * Copyright (c) 2012-2022, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -218,16 +218,6 @@ int mei_start(struct mei_device *dev) goto err; } - if (!mei_host_is_ready(dev)) { - dev_err(dev->dev, "host is not ready.\n"); - goto err; - } - - if (!mei_hw_is_ready(dev)) { - dev_err(dev->dev, "ME is not ready.\n"); - goto err; - } - if (!mei_hbm_version_is_supported(dev)) { dev_dbg(dev->dev, "MEI start failed.\n"); goto err; @@ -320,6 +310,8 @@ void mei_stop(struct mei_device *dev) mei_clear_interrupts(dev); mei_synchronize_irq(dev); + /* to catch HW-initiated reset */ + mei_cancel_work(dev); mutex_lock(&dev->device_lock); @@ -357,14 +349,16 @@ bool mei_write_is_idle(struct mei_device *dev) EXPORT_SYMBOL_GPL(mei_write_is_idle); /** - * mei_device_init -- initialize mei_device structure + * mei_device_init - initialize mei_device structure * * @dev: the mei device * @device: the device structure + * @slow_fw: configure longer timeouts as FW is slow * @hw_ops: hw operations */ void mei_device_init(struct mei_device *dev, struct device *device, + bool slow_fw, const struct mei_hw_ops *hw_ops) { /* setup our list array */ @@ -393,6 +387,8 @@ void mei_device_init(struct mei_device *dev, bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); dev->open_handle_count = 0; + dev->pxp_mode = MEI_DEV_PXP_DEFAULT; + /* * Reserving the first client ID * 0: Reserved for MEI Bus Message communications @@ -402,6 +398,21 @@ void mei_device_init(struct mei_device *dev, dev->pg_event = MEI_PG_EVENT_IDLE; dev->ops = hw_ops; dev->dev = device; + + dev->timeouts.hw_ready = mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT); + dev->timeouts.connect = MEI_CONNECT_TIMEOUT; + dev->timeouts.client_init = MEI_CLIENTS_INIT_TIMEOUT; + dev->timeouts.pgi = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); + dev->timeouts.d0i3 = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT); + if (slow_fw) { + dev->timeouts.cl_connect = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT_SLOW); + dev->timeouts.hbm = mei_secs_to_jiffies(MEI_HBM_TIMEOUT_SLOW); + dev->timeouts.mkhi_recv = msecs_to_jiffies(MKHI_RCV_TIMEOUT_SLOW); + } else { + dev->timeouts.cl_connect = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT); + dev->timeouts.hbm = mei_secs_to_jiffies(MEI_HBM_TIMEOUT); + dev->timeouts.mkhi_recv = msecs_to_jiffies(MKHI_RCV_TIMEOUT); + } } EXPORT_SYMBOL_GPL(mei_device_init); diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 786f7c8f7f61..930887e7e38d 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -571,7 +571,7 @@ static int mei_ioctl_connect_vtag(struct file *file, cl->state == MEI_FILE_DISCONNECTED || cl->state == MEI_FILE_DISCONNECT_REQUIRED || cl->state == MEI_FILE_DISCONNECT_REPLY), - mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); + dev->timeouts.cl_connect); mutex_lock(&dev->device_lock); } diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 694f866f87ef..6bb3e1ba9ded 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2003-2019, Intel Corporation. All rights reserved. + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -62,6 +62,21 @@ enum mei_dev_state { MEI_DEV_POWER_UP }; +/** + * enum mei_dev_pxp_mode - MEI PXP mode state + * + * @MEI_DEV_PXP_DEFAULT: PCH based device, no initailization required + * @MEI_DEV_PXP_INIT: device requires initialization, send setup message to firmware + * @MEI_DEV_PXP_SETUP: device is in setup stage, waiting for firmware repsonse + * @MEI_DEV_PXP_READY: device initialized + */ +enum mei_dev_pxp_mode { + MEI_DEV_PXP_DEFAULT = 0, + MEI_DEV_PXP_INIT = 1, + MEI_DEV_PXP_SETUP = 2, + MEI_DEV_PXP_READY = 3, +}; + const char *mei_dev_state_str(int state); enum mei_file_transaction_states { @@ -415,6 +430,17 @@ struct mei_fw_version { #define MEI_MAX_FW_VER_BLOCKS 3 +struct mei_dev_timeouts { + unsigned long hw_ready; /* Timeout on ready message, in jiffies */ + int connect; /* HPS: at least 2 seconds, in seconds */ + unsigned long cl_connect; /* HPS: Client Connect Timeout, in jiffies */ + int client_init; /* HPS: Clients Enumeration Timeout, in seconds */ + unsigned long pgi; /* PG Isolation time response, in jiffies */ + unsigned int d0i3; /* D0i3 set/unset max response time, in jiffies */ + unsigned long hbm; /* HBM operation timeout, in jiffies */ + unsigned long mkhi_recv; /* receive timeout, in jiffies */ +}; + /** * struct mei_device - MEI private device struct * @@ -443,6 +469,7 @@ struct mei_fw_version { * @reset_count : number of consecutive resets * @dev_state : device state * @hbm_state : state of host bus message protocol + * @pxp_mode : PXP device mode * @init_clients_timer : HBM init handshake timeout * * @pg_event : power gating event @@ -480,6 +507,8 @@ struct mei_fw_version { * @allow_fixed_address: allow user space to connect a fixed client * @override_fixed_address: force allow fixed address behavior * + * @timeouts: actual timeout values + * * @reset_work : work item for the device reset * @bus_rescan_work : work item for the bus rescan * @@ -524,6 +553,7 @@ struct mei_device { unsigned long reset_count; enum mei_dev_state dev_state; enum mei_hbm_state hbm_state; + enum mei_dev_pxp_mode pxp_mode; u16 init_clients_timer; /* @@ -568,6 +598,8 @@ struct mei_device { bool allow_fixed_address; bool override_fixed_address; + struct mei_dev_timeouts timeouts; + struct work_struct reset_work; struct work_struct bus_rescan_work; @@ -632,6 +664,7 @@ static inline u32 mei_slots2data(int slots) */ void mei_device_init(struct mei_device *dev, struct device *device, + bool slow_fw, const struct mei_hw_ops *hw_ops); int mei_reset(struct mei_device *dev); int mei_start(struct mei_device *dev); diff --git a/drivers/misc/mei/mkhi.h b/drivers/misc/mei/mkhi.h new file mode 100644 index 000000000000..1473ea489666 --- /dev/null +++ b/drivers/misc/mei/mkhi.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. + * Intel Management Engine Interface (Intel MEI) Linux driver + */ + +#ifndef _MEI_MKHI_H_ +#define _MEI_MKHI_H_ + +#include <linux/types.h> + +#define MKHI_FEATURE_PTT 0x10 + +#define MKHI_FWCAPS_GROUP_ID 0x3 +#define MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD 6 +#define MKHI_GEN_GROUP_ID 0xFF +#define MKHI_GEN_GET_FW_VERSION_CMD 0x2 + +#define MKHI_GROUP_ID_GFX 0x30 +#define MKHI_GFX_RESET_WARN_CMD_REQ 0x0 +#define MKHI_GFX_MEMORY_READY_CMD_REQ 0x1 + +/* Allow transition to PXP mode without approval */ +#define MKHI_GFX_MEM_READY_PXP_ALLOWED 0x1 + +struct mkhi_rule_id { + __le16 rule_type; + u8 feature_id; + u8 reserved; +} __packed; + +struct mkhi_fwcaps { + struct mkhi_rule_id id; + u8 len; + u8 data[]; +} __packed; + +struct mkhi_msg_hdr { + u8 group_id; + u8 command; + u8 reserved; + u8 result; +} __packed; + +struct mkhi_msg { + struct mkhi_msg_hdr hdr; + u8 data[]; +} __packed; + +struct mkhi_gfx_mem_ready { + struct mkhi_msg_hdr hdr; + u32 flags; +} __packed; + +#endif /* _MEI_MKHI_H_ */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 5435604327a7..704cd0caa172 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. * Intel Management Engine Interface (Intel MEI) Linux driver */ @@ -203,7 +203,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } /* allocates and initializes the mei dev structure */ - dev = mei_me_dev_init(&pdev->dev, cfg); + dev = mei_me_dev_init(&pdev->dev, cfg, false); if (!dev) { err = -ENOMEM; goto end; diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index f2f98e942cf2..0cc5ac35fc57 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -93,6 +93,7 @@ config PEAQ_WMI config NVIDIA_WMI_EC_BACKLIGHT tristate "EC Backlight Driver for Hybrid Graphics Notebook Systems" + depends on ACPI_VIDEO depends on ACPI_WMI depends on BACKLIGHT_CLASS_DEVICE help diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index e0230ea0cb7e..b933a5165edb 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -643,69 +643,6 @@ static const struct dmi_system_id non_acer_quirks[] __initconst = { {} }; -static int __init -video_set_backlight_video_vendor(const struct dmi_system_id *d) -{ - interface->capability &= ~ACER_CAP_BRIGHTNESS; - pr_info("Brightness must be controlled by generic video driver\n"); - return 0; -} - -static const struct dmi_system_id video_vendor_dmi_table[] __initconst = { - { - .callback = video_set_backlight_video_vendor, - .ident = "Acer TravelMate 4750", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4750"), - }, - }, - { - .callback = video_set_backlight_video_vendor, - .ident = "Acer Extensa 5235", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5235"), - }, - }, - { - .callback = video_set_backlight_video_vendor, - .ident = "Acer TravelMate 5760", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5760"), - }, - }, - { - .callback = video_set_backlight_video_vendor, - .ident = "Acer Aspire 5750", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"), - }, - }, - { - .callback = video_set_backlight_video_vendor, - .ident = "Acer Aspire 5741", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5741"), - }, - }, - { - /* - * Note no video_set_backlight_video_vendor, we must use the - * acer interface, as there is no native backlight interface. - */ - .ident = "Acer KAV80", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "KAV80"), - }, - }, - {} -}; - /* Find which quirks are needed for a particular vendor/ model pair */ static void __init find_quirks(void) { @@ -2477,9 +2414,6 @@ static int __init acer_wmi_init(void) set_quirks(); - if (dmi_check_system(video_vendor_dmi_table)) - acpi_video_set_dmi_backlight_type(acpi_backlight_vendor); - if (acpi_video_get_backlight_type() != acpi_backlight_vendor) interface->capability &= ~ACER_CAP_BRIGHTNESS; diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c index ffe98a18440b..ca33df7ea550 100644 --- a/drivers/platform/x86/apple-gmux.c +++ b/drivers/platform/x86/apple-gmux.c @@ -21,7 +21,6 @@ #include <linux/delay.h> #include <linux/pci.h> #include <linux/vga_switcheroo.h> -#include <acpi/video.h> #include <asm/io.h> /** @@ -694,7 +693,6 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) * backlight control and supports more levels than other options. * Disable the other backlight choices. */ - acpi_video_set_dmi_backlight_type(acpi_backlight_vendor); apple_bl_unregister(); gmux_data->power_state = VGA_SWITCHEROO_ON; @@ -804,7 +802,6 @@ static void gmux_remove(struct pnp_dev *pnp) apple_gmux_data = NULL; kfree(gmux_data); - acpi_video_register(); apple_bl_register(); } diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 478dd300b9c9..bbfed85051ee 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -79,12 +79,10 @@ static struct quirk_entry quirk_asus_q500a = { /* * For those machines that need software to control bt/wifi status - * and can't adjust brightness through ACPI interface * and have duplicate events(ACPI and WMI) for display toggle */ static struct quirk_entry quirk_asus_x55u = { .wapf = 4, - .wmi_backlight_power = true, .wmi_backlight_set_devstate = true, .no_display_toggle = true, }; @@ -99,11 +97,6 @@ static struct quirk_entry quirk_asus_x200ca = { .wmi_backlight_set_devstate = true, }; -static struct quirk_entry quirk_asus_ux303ub = { - .wmi_backlight_native = true, - .wmi_backlight_set_devstate = true, -}; - static struct quirk_entry quirk_asus_x550lb = { .wmi_backlight_set_devstate = true, .xusb2pr = 0x01D9, @@ -147,11 +140,6 @@ static const struct dmi_system_id asus_quirks[] = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "U32U"), }, - /* - * Note this machine has a Brazos APU, and most Brazos Asus - * machines need quirk_asus_x55u / wmi_backlight_power but - * here acpi-video seems to work fine for backlight control. - */ .driver_data = &quirk_asus_wapf4, }, { @@ -381,15 +369,6 @@ static const struct dmi_system_id asus_quirks[] = { }, { .callback = dmi_matched, - .ident = "ASUSTeK COMPUTER INC. UX303UB", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "UX303UB"), - }, - .driver_data = &quirk_asus_ux303ub, - }, - { - .callback = dmi_matched, .ident = "ASUSTeK COMPUTER INC. UX330UAK", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 89b604e04d7f..434249ac47a5 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -3553,7 +3553,6 @@ static int asus_wmi_add(struct platform_device *pdev) struct platform_driver *pdrv = to_platform_driver(pdev->dev.driver); struct asus_wmi_driver *wdrv = to_asus_wmi_driver(pdrv); struct asus_wmi *asus; - const char *chassis_type; acpi_status status; int err; u32 result; @@ -3635,18 +3634,6 @@ static int asus_wmi_add(struct platform_device *pdev) if (asus->driver->quirks->wmi_force_als_set) asus_wmi_set_als(); - /* Some Asus desktop boards export an acpi-video backlight interface, - stop this from showing up */ - chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); - if (chassis_type && !strcmp(chassis_type, "3")) - acpi_video_set_dmi_backlight_type(acpi_backlight_vendor); - - if (asus->driver->quirks->wmi_backlight_power) - acpi_video_set_dmi_backlight_type(acpi_backlight_vendor); - - if (asus->driver->quirks->wmi_backlight_native) - acpi_video_set_dmi_backlight_type(acpi_backlight_native); - if (asus->driver->quirks->xusb2pr) asus_wmi_set_xusb2pr(asus); diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h index b302415bf1d9..f30252efe1db 100644 --- a/drivers/platform/x86/asus-wmi.h +++ b/drivers/platform/x86/asus-wmi.h @@ -29,8 +29,6 @@ struct quirk_entry { bool hotplug_wireless; bool scalar_panel_brightness; bool store_backlight_power; - bool wmi_backlight_power; - bool wmi_backlight_native; bool wmi_backlight_set_devstate; bool wmi_force_als_set; bool use_kbd_dock_devid; diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c index ce86d84ee796..32d9f0ba6be3 100644 --- a/drivers/platform/x86/eeepc-wmi.c +++ b/drivers/platform/x86/eeepc-wmi.c @@ -96,11 +96,6 @@ static struct quirk_entry quirk_asus_et2012_type3 = { .store_backlight_power = true, }; -static struct quirk_entry quirk_asus_x101ch = { - /* We need this when ACPI function doesn't do this well */ - .wmi_backlight_power = true, -}; - static struct quirk_entry *quirks; static void et2012_quirks(void) @@ -151,25 +146,7 @@ static const struct dmi_system_id asus_quirks[] = { }, .driver_data = &quirk_asus_unknown, }, - { - .callback = dmi_matched, - .ident = "ASUSTeK Computer INC. X101CH", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X101CH"), - }, - .driver_data = &quirk_asus_x101ch, - }, - { - .callback = dmi_matched, - .ident = "ASUSTeK Computer INC. 1015CX", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "1015CX"), - }, - .driver_data = &quirk_asus_x101ch, - }, - {}, + {} }; static void eeepc_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code, diff --git a/drivers/platform/x86/nvidia-wmi-ec-backlight.c b/drivers/platform/x86/nvidia-wmi-ec-backlight.c index 61e37194df70..baccdf658538 100644 --- a/drivers/platform/x86/nvidia-wmi-ec-backlight.c +++ b/drivers/platform/x86/nvidia-wmi-ec-backlight.c @@ -7,73 +7,10 @@ #include <linux/backlight.h> #include <linux/mod_devicetable.h> #include <linux/module.h> +#include <linux/platform_data/x86/nvidia-wmi-ec-backlight.h> #include <linux/types.h> #include <linux/wmi.h> - -/** - * enum wmi_brightness_method - WMI method IDs - * @WMI_BRIGHTNESS_METHOD_LEVEL: Get/Set EC brightness level status - * @WMI_BRIGHTNESS_METHOD_SOURCE: Get/Set EC Brightness Source - */ -enum wmi_brightness_method { - WMI_BRIGHTNESS_METHOD_LEVEL = 1, - WMI_BRIGHTNESS_METHOD_SOURCE = 2, - WMI_BRIGHTNESS_METHOD_MAX -}; - -/** - * enum wmi_brightness_mode - Operation mode for WMI-wrapped method - * @WMI_BRIGHTNESS_MODE_GET: Get the current brightness level/source. - * @WMI_BRIGHTNESS_MODE_SET: Set the brightness level. - * @WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL: Get the maximum brightness level. This - * is only valid when the WMI method is - * %WMI_BRIGHTNESS_METHOD_LEVEL. - */ -enum wmi_brightness_mode { - WMI_BRIGHTNESS_MODE_GET = 0, - WMI_BRIGHTNESS_MODE_SET = 1, - WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL = 2, - WMI_BRIGHTNESS_MODE_MAX -}; - -/** - * enum wmi_brightness_source - Backlight brightness control source selection - * @WMI_BRIGHTNESS_SOURCE_GPU: Backlight brightness is controlled by the GPU. - * @WMI_BRIGHTNESS_SOURCE_EC: Backlight brightness is controlled by the - * system's Embedded Controller (EC). - * @WMI_BRIGHTNESS_SOURCE_AUX: Backlight brightness is controlled over the - * DisplayPort AUX channel. - */ -enum wmi_brightness_source { - WMI_BRIGHTNESS_SOURCE_GPU = 1, - WMI_BRIGHTNESS_SOURCE_EC = 2, - WMI_BRIGHTNESS_SOURCE_AUX = 3, - WMI_BRIGHTNESS_SOURCE_MAX -}; - -/** - * struct wmi_brightness_args - arguments for the WMI-wrapped ACPI method - * @mode: Pass in an &enum wmi_brightness_mode value to select between - * getting or setting a value. - * @val: In parameter for value to set when using %WMI_BRIGHTNESS_MODE_SET - * mode. Not used in conjunction with %WMI_BRIGHTNESS_MODE_GET or - * %WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL mode. - * @ret: Out parameter returning retrieved value when operating in - * %WMI_BRIGHTNESS_MODE_GET or %WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL - * mode. Not used in %WMI_BRIGHTNESS_MODE_SET mode. - * @ignored: Padding; not used. The ACPI method expects a 24 byte params struct. - * - * This is the parameters structure for the WmiBrightnessNotify ACPI method as - * wrapped by WMI. The value passed in to @val or returned by @ret will be a - * brightness value when the WMI method ID is %WMI_BRIGHTNESS_METHOD_LEVEL, or - * an &enum wmi_brightness_source value with %WMI_BRIGHTNESS_METHOD_SOURCE. - */ -struct wmi_brightness_args { - u32 mode; - u32 val; - u32 ret; - u32 ignored[3]; -}; +#include <acpi/video.h> /** * wmi_brightness_notify() - helper function for calling WMI-wrapped ACPI method @@ -151,19 +88,10 @@ static int nvidia_wmi_ec_backlight_probe(struct wmi_device *wdev, const void *ct { struct backlight_properties props = {}; struct backlight_device *bdev; - u32 source; int ret; - ret = wmi_brightness_notify(wdev, WMI_BRIGHTNESS_METHOD_SOURCE, - WMI_BRIGHTNESS_MODE_GET, &source); - if (ret) - return ret; - - /* - * This driver is only to be used when brightness control is handled - * by the EC; otherwise, the GPU driver(s) should control brightness. - */ - if (source != WMI_BRIGHTNESS_SOURCE_EC) + /* drivers/acpi/video_detect.c also checks that SOURCE == EC */ + if (acpi_video_get_backlight_type() != acpi_backlight_nvidia_wmi_ec) return -ENODEV; /* @@ -191,8 +119,6 @@ static int nvidia_wmi_ec_backlight_probe(struct wmi_device *wdev, const void *ct return PTR_ERR_OR_ZERO(bdev); } -#define WMI_BRIGHTNESS_GUID "603E9613-EF25-4338-A3D0-C46177516DB7" - static const struct wmi_device_id nvidia_wmi_ec_backlight_id_table[] = { { .guid_string = WMI_BRIGHTNESS_GUID }, { } diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index c187dcdf82f0..cc30cf08f32d 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -356,23 +356,13 @@ struct samsung_laptop { }; struct samsung_quirks { - bool broken_acpi_video; bool four_kbd_backlight_levels; bool enable_kbd_backlight; - bool use_native_backlight; bool lid_handling; }; static struct samsung_quirks samsung_unknown = {}; -static struct samsung_quirks samsung_broken_acpi_video = { - .broken_acpi_video = true, -}; - -static struct samsung_quirks samsung_use_native_backlight = { - .use_native_backlight = true, -}; - static struct samsung_quirks samsung_np740u3e = { .four_kbd_backlight_levels = true, .enable_kbd_backlight = true, @@ -1542,76 +1532,6 @@ static const struct dmi_system_id samsung_dmi_table[] __initconst = { /* Specific DMI ids for laptop with quirks */ { .callback = samsung_dmi_matched, - .ident = "N150P", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "N150P"), - DMI_MATCH(DMI_BOARD_NAME, "N150P"), - }, - .driver_data = &samsung_use_native_backlight, - }, - { - .callback = samsung_dmi_matched, - .ident = "N145P/N250P/N260P", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"), - DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"), - }, - .driver_data = &samsung_use_native_backlight, - }, - { - .callback = samsung_dmi_matched, - .ident = "N150/N210/N220", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"), - DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"), - }, - .driver_data = &samsung_broken_acpi_video, - }, - { - .callback = samsung_dmi_matched, - .ident = "NF110/NF210/NF310", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"), - DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"), - }, - .driver_data = &samsung_broken_acpi_video, - }, - { - .callback = samsung_dmi_matched, - .ident = "X360", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "X360"), - DMI_MATCH(DMI_BOARD_NAME, "X360"), - }, - .driver_data = &samsung_broken_acpi_video, - }, - { - .callback = samsung_dmi_matched, - .ident = "N250P", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "N250P"), - DMI_MATCH(DMI_BOARD_NAME, "N250P"), - }, - .driver_data = &samsung_use_native_backlight, - }, - { - .callback = samsung_dmi_matched, - .ident = "NC210", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), - DMI_MATCH(DMI_PRODUCT_NAME, "NC210/NC110"), - DMI_MATCH(DMI_BOARD_NAME, "NC210/NC110"), - }, - .driver_data = &samsung_broken_acpi_video, - }, - { - .callback = samsung_dmi_matched, .ident = "730U3E/740U3E", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), @@ -1654,15 +1574,8 @@ static int __init samsung_init(void) samsung->handle_backlight = true; samsung->quirks = quirks; -#ifdef CONFIG_ACPI - if (samsung->quirks->broken_acpi_video) - acpi_video_set_dmi_backlight_type(acpi_backlight_vendor); - if (samsung->quirks->use_native_backlight) - acpi_video_set_dmi_backlight_type(acpi_backlight_native); - if (acpi_video_get_backlight_type() != acpi_backlight_vendor) samsung->handle_backlight = false; -#endif ret = samsung_platform_init(samsung); if (ret) diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 0fc9e8b8827b..030dc37d50b8 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -272,14 +272,6 @@ static const struct key_entry toshiba_acpi_alt_keymap[] = { }; /* - * List of models which have a broken acpi-video backlight interface and thus - * need to use the toshiba (vendor) interface instead. - */ -static const struct dmi_system_id toshiba_vendor_backlight_dmi[] = { - {} -}; - -/* * Utility */ @@ -2881,14 +2873,6 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev) return 0; } - /* - * Tell acpi-video-detect code to prefer vendor backlight on all - * systems with transflective backlight and on dmi matched systems. - */ - if (dev->tr_backlight_supported || - dmi_check_system(toshiba_vendor_backlight_dmi)) - acpi_video_set_dmi_backlight_type(acpi_backlight_vendor); - if (acpi_video_get_backlight_type() != acpi_backlight_vendor) return 0; |