diff options
Diffstat (limited to 'drivers/gpu/drm')
354 files changed, 31612 insertions, 16074 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index fc357319de35..483059a22b1b 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -108,33 +108,13 @@ config DRM_KMS_CMA_HELPER source "drivers/gpu/drm/i2c/Kconfig" -config DRM_TDFX - tristate "3dfx Banshee/Voodoo3+" - depends on DRM && PCI - help - Choose this option if you have a 3dfx Banshee or Voodoo3 (or later), - graphics card. If M is selected, the module will be called tdfx. - source "drivers/gpu/drm/arm/Kconfig" -config DRM_R128 - tristate "ATI Rage 128" - depends on DRM && PCI - select FW_LOADER - help - Choose this option if you have an ATI Rage 128 graphics card. If M - is selected, the module will be called r128. AGP support for - this card is strongly suggested (unless you have a PCI version). - config DRM_RADEON tristate "ATI Radeon" depends on DRM && PCI - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT select FW_LOADER select DRM_KMS_HELPER - select DRM_KMS_FB_HELPER select DRM_TTM select POWER_SUPPLY select HWMON @@ -153,12 +133,8 @@ source "drivers/gpu/drm/radeon/Kconfig" config DRM_AMDGPU tristate "AMD GPU" depends on DRM && PCI - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT select FW_LOADER select DRM_KMS_HELPER - select DRM_KMS_FB_HELPER select DRM_TTM select POWER_SUPPLY select HWMON @@ -171,55 +147,11 @@ config DRM_AMDGPU If M is selected, the module will be called amdgpu. source "drivers/gpu/drm/amd/amdgpu/Kconfig" -source "drivers/gpu/drm/amd/powerplay/Kconfig" - -source "drivers/gpu/drm/amd/acp/Kconfig" source "drivers/gpu/drm/nouveau/Kconfig" -config DRM_I810 - tristate "Intel I810" - # !PREEMPT because of missing ioctl locking - depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN) - help - Choose this option if you have an Intel I810 graphics card. If M is - selected, the module will be called i810. AGP support is required - for this driver to work. - source "drivers/gpu/drm/i915/Kconfig" -config DRM_MGA - tristate "Matrox g200/g400" - depends on DRM && PCI - select FW_LOADER - help - Choose this option if you have a Matrox G200, G400 or G450 graphics - card. If M is selected, the module will be called mga. AGP - support is required for this driver to work. - -config DRM_SIS - tristate "SiS video cards" - depends on DRM && AGP - depends on FB_SIS || FB_SIS=n - help - Choose this option if you have a SiS 630 or compatible video - chipset. If M is selected the module will be called sis. AGP - support is required for this driver to work. - -config DRM_VIA - tristate "Via unichrome video cards" - depends on DRM && PCI - help - Choose this option if you have a Via unichrome or compatible video - chipset. If M is selected the module will be called via. - -config DRM_SAVAGE - tristate "Savage video cards" - depends on DRM && PCI - help - Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister - chipset. If M is selected the module will be called savage. - config DRM_VGEM tristate "Virtual GEM provider" depends on DRM @@ -290,3 +222,81 @@ source "drivers/gpu/drm/arc/Kconfig" source "drivers/gpu/drm/hisilicon/Kconfig" source "drivers/gpu/drm/mediatek/Kconfig" + +# Keep legacy drivers last + +menuconfig DRM_LEGACY + bool "Enable legacy drivers (DANGEROUS)" + depends on DRM + help + Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous + APIs to user-space, which can be used to circumvent access + restrictions and other security measures. For backwards compatibility + those drivers are still available, but their use is highly + inadvisable and might harm your system. + + You are recommended to use the safe modeset-only drivers instead, and + perform 3D emulation in user-space. + + Unless you have strong reasons to go rogue, say "N". + +if DRM_LEGACY + +config DRM_TDFX + tristate "3dfx Banshee/Voodoo3+" + depends on DRM && PCI + help + Choose this option if you have a 3dfx Banshee or Voodoo3 (or later), + graphics card. If M is selected, the module will be called tdfx. + +config DRM_R128 + tristate "ATI Rage 128" + depends on DRM && PCI + select FW_LOADER + help + Choose this option if you have an ATI Rage 128 graphics card. If M + is selected, the module will be called r128. AGP support for + this card is strongly suggested (unless you have a PCI version). + +config DRM_I810 + tristate "Intel I810" + # !PREEMPT because of missing ioctl locking + depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN) + help + Choose this option if you have an Intel I810 graphics card. If M is + selected, the module will be called i810. AGP support is required + for this driver to work. + +config DRM_MGA + tristate "Matrox g200/g400" + depends on DRM && PCI + select FW_LOADER + help + Choose this option if you have a Matrox G200, G400 or G450 graphics + card. If M is selected, the module will be called mga. AGP + support is required for this driver to work. + +config DRM_SIS + tristate "SiS video cards" + depends on DRM && AGP + depends on FB_SIS || FB_SIS=n + help + Choose this option if you have a SiS 630 or compatible video + chipset. If M is selected the module will be called sis. AGP + support is required for this driver to work. + +config DRM_VIA + tristate "Via unichrome video cards" + depends on DRM && PCI + help + Choose this option if you have a Via unichrome or compatible video + chipset. If M is selected the module will be called via. + +config DRM_SAVAGE + tristate "Savage video cards" + depends on DRM && PCI + help + Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister + chipset. If M is selected the module will be called savage. + +endif # DRM_LEGACY diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 0238bf8bc8c3..439d89b25ae0 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -12,7 +12,9 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ drm_info.o drm_debugfs.o drm_encoder_slave.o \ drm_trace_points.o drm_global.o drm_prime.o \ drm_rect.o drm_vma_manager.o drm_flip_work.o \ - drm_modeset_lock.o drm_atomic.o drm_bridge.o + drm_modeset_lock.o drm_atomic.o drm_bridge.o \ + drm_framebuffer.o drm_connector.o drm_blend.o \ + drm_encoder.o drm_mode_object.o drm_property.o drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o @@ -24,7 +26,7 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ drm_kms_helper_common.o drm_dp_dual_mode_helper.o \ - drm_simple_kms_helper.o drm_blend.o + drm_simple_kms_helper.o drm_modeset_helper.o drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o @@ -46,7 +48,7 @@ obj-$(CONFIG_DRM_RADEON)+= radeon/ obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/ obj-$(CONFIG_DRM_MGA) += mga/ obj-$(CONFIG_DRM_I810) += i810/ -obj-$(CONFIG_DRM_I915) += i915/ +obj-$(CONFIG_DRM_I915) += i915/ obj-$(CONFIG_DRM_MGAG200) += mgag200/ obj-$(CONFIG_DRM_VC4) += vc4/ obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/ diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index 7335c0420c70..f3cb69de0c44 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -25,3 +25,5 @@ config DRM_AMDGPU_GART_DEBUGFS Selecting this option creates a debugfs file to inspect the mapped pages. Uses more memory for housekeeping, enable only for debugging. +source "drivers/gpu/drm/amd/powerplay/Kconfig" +source "drivers/gpu/drm/amd/acp/Kconfig" diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index c7fcdcedaadb..21dd7c00da15 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -58,7 +58,8 @@ amdgpu-y += \ # add DCE block amdgpu-y += \ dce_v10_0.o \ - dce_v11_0.o + dce_v11_0.o \ + dce_virtual.o # add GFX block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h index 06192698bd96..b8d66670bb17 100644 --- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h +++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h @@ -90,6 +90,7 @@ #define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24 #define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25 #define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27 +#define ENCODER_OBJECT_ID_VIRTUAL 0x28 #define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF @@ -119,6 +120,7 @@ #define CONNECTOR_OBJECT_ID_eDP 0x14 #define CONNECTOR_OBJECT_ID_MXM 0x15 #define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16 +#define CONNECTOR_OBJECT_ID_VIRTUAL 0x17 /* deleted */ @@ -147,6 +149,7 @@ #define GRAPH_OBJECT_ENUM_ID5 0x05 #define GRAPH_OBJECT_ENUM_ID6 0x06 #define GRAPH_OBJECT_ENUM_ID7 0x07 +#define GRAPH_OBJECT_ENUM_VIRTUAL 0x08 /****************************************************/ /* Graphics Object ID Bit definition */ @@ -408,6 +411,10 @@ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT) +#define ENCODER_VIRTUAL_ENUM_VIRTUAL ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_VIRTUAL << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_VIRTUAL << OBJECT_ID_SHIFT) + /****************************************************/ /* Connector Object ID definition - Shared with BIOS */ /****************************************************/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 8ebc5f1eb4c0..3cc2629eb158 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -51,6 +51,7 @@ #include "amdgpu_ih.h" #include "amdgpu_irq.h" #include "amdgpu_ucode.h" +#include "amdgpu_ttm.h" #include "amdgpu_gds.h" #include "amd_powerplay.h" #include "amdgpu_acp.h" @@ -91,6 +92,8 @@ extern unsigned amdgpu_pcie_lane_cap; extern unsigned amdgpu_cg_mask; extern unsigned amdgpu_pg_mask; extern char *amdgpu_disable_cu; +extern int amdgpu_sclk_deep_sleep_en; +extern char *amdgpu_virtual_display; #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ @@ -248,10 +251,9 @@ struct amdgpu_vm_pte_funcs { uint64_t pe, uint64_t src, unsigned count); /* write pte one entry at a time with addr mapping */ - void (*write_pte)(struct amdgpu_ib *ib, - const dma_addr_t *pages_addr, uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags); + void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe, + uint64_t value, unsigned count, + uint32_t incr); /* for linear pte/pde updates without addr mapping */ void (*set_pte_pde)(struct amdgpu_ib *ib, uint64_t pe, @@ -396,46 +398,9 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); /* - * TTM. + * BO. */ -#define AMDGPU_TTM_LRU_SIZE 20 - -struct amdgpu_mman_lru { - struct list_head *lru[TTM_NUM_MEM_TYPES]; - struct list_head *swap_lru; -}; - -struct amdgpu_mman { - struct ttm_bo_global_ref bo_global_ref; - struct drm_global_reference mem_global_ref; - struct ttm_bo_device bdev; - bool mem_global_referenced; - bool initialized; - -#if defined(CONFIG_DEBUG_FS) - struct dentry *vram; - struct dentry *gtt; -#endif - - /* buffer handling */ - const struct amdgpu_buffer_funcs *buffer_funcs; - struct amdgpu_ring *buffer_funcs_ring; - /* Scheduler entity for buffer moves */ - struct amd_sched_entity entity; - - /* custom LRU management */ - struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE]; -}; - -int amdgpu_copy_buffer(struct amdgpu_ring *ring, - uint64_t src_offset, - uint64_t dst_offset, - uint32_t byte_count, - struct reservation_object *resv, - struct fence **fence); -int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); - struct amdgpu_bo_list_entry { struct amdgpu_bo *robj; struct ttm_validate_buffer tv; @@ -498,10 +463,12 @@ struct amdgpu_bo { struct amdgpu_device *adev; struct drm_gem_object gem_base; struct amdgpu_bo *parent; + struct amdgpu_bo *shadow; struct ttm_bo_kmap_obj dma_buf_vmap; struct amdgpu_mn *mn; struct list_head mn_list; + struct list_head shadow_list; }; #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base) @@ -677,6 +644,8 @@ struct amdgpu_mc { uint32_t fw_version; struct amdgpu_irq_src vm_fault; uint32_t vram_type; + uint32_t srbm_soft_reset; + struct amdgpu_mode_mc_save save; }; /* @@ -721,10 +690,11 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, */ struct amdgpu_flip_work { - struct work_struct flip_work; + struct delayed_work flip_work; struct work_struct unpin_work; struct amdgpu_device *adev; int crtc_id; + u32 target_vblank; uint64_t base; struct drm_pending_vblank_event *event; struct amdgpu_bo *old_rbo; @@ -815,13 +785,17 @@ struct amdgpu_ring { /* maximum number of VMIDs */ #define AMDGPU_NUM_VM 16 +/* Maximum number of PTEs the hardware can write with one command */ +#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF + /* number of entries in page table */ #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size) /* PTBs (Page Table Blocks) need to be aligned to 32K */ #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 -#define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1) -#define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK) + +/* LOG2 number of continuous pages for the fragment field */ +#define AMDGPU_LOG2_PAGES_PER_FRAG 4 #define AMDGPU_PTE_VALID (1 << 0) #define AMDGPU_PTE_SYSTEM (1 << 1) @@ -833,10 +807,7 @@ struct amdgpu_ring { #define AMDGPU_PTE_READABLE (1 << 5) #define AMDGPU_PTE_WRITEABLE (1 << 6) -/* PTE (Page Table Entry) fragment field for different page sizes */ -#define AMDGPU_PTE_FRAG_4KB (0 << 7) -#define AMDGPU_PTE_FRAG_64KB (4 << 7) -#define AMDGPU_LOG2_PAGES_PER_FRAG 4 +#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7) /* How to programm VM fault handling */ #define AMDGPU_VM_FAULT_STOP_NEVER 0 @@ -846,6 +817,7 @@ struct amdgpu_ring { struct amdgpu_vm_pt { struct amdgpu_bo_list_entry entry; uint64_t addr; + uint64_t shadow_addr; }; struct amdgpu_vm { @@ -948,7 +920,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_job *job); int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); -uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_clear_freed(struct amdgpu_device *adev, @@ -957,7 +928,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_sync *sync); int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, - struct ttm_mem_reg *mem); + bool clear); void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_bo *bo); struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, @@ -1195,6 +1166,10 @@ struct amdgpu_gfx { unsigned ce_ram_size; struct amdgpu_cu_info cu_info; const struct amdgpu_gfx_funcs *funcs; + + /* reset mask */ + uint32_t grbm_soft_reset; + uint32_t srbm_soft_reset; }; int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, @@ -1683,6 +1658,7 @@ struct amdgpu_uvd { bool address_64_bit; bool use_ctx_buf; struct amd_sched_entity entity; + uint32_t srbm_soft_reset; }; /* @@ -1709,6 +1685,7 @@ struct amdgpu_vce { struct amdgpu_irq_src irq; unsigned harvest_config; struct amd_sched_entity entity; + uint32_t srbm_soft_reset; }; /* @@ -1729,6 +1706,7 @@ struct amdgpu_sdma { struct amdgpu_irq_src trap_irq; struct amdgpu_irq_src illegal_inst_irq; int num_instances; + uint32_t srbm_soft_reset; }; /* @@ -1956,6 +1934,7 @@ struct amdgpu_ip_block_status { bool valid; bool sw; bool hw; + bool hang; }; struct amdgpu_device { @@ -2055,6 +2034,7 @@ struct amdgpu_device { atomic_t gpu_reset_counter; /* display */ + bool enable_virtual_display; struct amdgpu_mode_info mode_info; struct work_struct hotplug_work; struct amdgpu_irq_src crtc_irq; @@ -2117,6 +2097,10 @@ struct amdgpu_device { struct kfd_dev *kfd; struct amdgpu_virtualization virtualization; + + /* link all shadow bo */ + struct list_head shadow_list; + struct mutex shadow_list_lock; }; bool amdgpu_device_is_px(struct drm_device *dev); @@ -2192,6 +2176,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); #define REG_GET_FIELD(value, reg, field) \ (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field)) +#define WREG32_FIELD(reg, field, val) \ + WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) + /* * BIOS helpers. */ @@ -2242,7 +2229,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) -#define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags))) +#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) @@ -2387,6 +2374,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) /* Common functions */ int amdgpu_gpu_reset(struct amdgpu_device *adev); +bool amdgpu_need_backup(struct amdgpu_device *adev); void amdgpu_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_card_posted(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev); @@ -2412,6 +2400,8 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base); void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); +u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev); +int amdgpu_ttm_global_init(struct amdgpu_device *adev); void amdgpu_program_register_sequence(struct amdgpu_device *adev, const u32 *registers, const u32 array_size); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 983175363b06..1b621160b52e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -259,6 +259,33 @@ static const int object_connector_convert[] = { DRM_MODE_CONNECTOR_Unknown }; +bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + struct atom_context *ctx = mode_info->atom_context; + int index = GetIndexIntoMasterTable(DATA, Object_Header); + u16 size, data_offset; + u8 frev, crev; + ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj; + ATOM_OBJECT_HEADER *obj_header; + + if (!amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) + return false; + + if (crev < 2) + return false; + + obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset); + path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *) + (ctx->bios + data_offset + + le16_to_cpu(obj_header->usDisplayPathTableOffset)); + + if (path_obj->ucNumOfDispPath) + return true; + else + return false; +} + bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev) { struct amdgpu_mode_info *mode_info = &adev->mode_info; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 8c2e69661799..15dd43ec38bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -140,6 +140,8 @@ struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device * uint8_t id); void amdgpu_atombios_i2c_init(struct amdgpu_device *adev); +bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev); + bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev); int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index 33e47a43ae32..345305235349 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c @@ -39,7 +39,8 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, start_jiffies = jiffies; for (i = 0; i < n; i++) { struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; - r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence); + r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence, + false); if (r) goto exit_do_move; r = fence_wait(fence, false); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index ff0b55a65ca3..319a5e1d9389 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -1504,6 +1504,86 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = { .force = amdgpu_connector_dvi_force, }; +static struct drm_encoder * +amdgpu_connector_virtual_encoder(struct drm_connector *connector) +{ + int enc_id = connector->encoder_ids[0]; + struct drm_encoder *encoder; + int i; + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { + if (connector->encoder_ids[i] == 0) + break; + + encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); + if (!encoder) + continue; + + if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) + return encoder; + } + + /* pick the first one */ + if (enc_id) + return drm_encoder_find(connector->dev, enc_id); + return NULL; +} + +static int amdgpu_connector_virtual_get_modes(struct drm_connector *connector) +{ + struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); + + if (encoder) { + amdgpu_connector_add_common_modes(encoder, connector); + } + + return 0; +} + +static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + return MODE_OK; +} + +int amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode) +{ + return 0; +} + +static enum drm_connector_status + +amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force) +{ + return connector_status_connected; +} + +int amdgpu_connector_virtual_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t val) +{ + return 0; +} + +static void amdgpu_connector_virtual_force(struct drm_connector *connector) +{ + return; +} + +static const struct drm_connector_helper_funcs amdgpu_connector_virtual_helper_funcs = { + .get_modes = amdgpu_connector_virtual_get_modes, + .mode_valid = amdgpu_connector_virtual_mode_valid, + .best_encoder = amdgpu_connector_virtual_encoder, +}; + +static const struct drm_connector_funcs amdgpu_connector_virtual_funcs = { + .dpms = amdgpu_connector_virtual_dpms, + .detect = amdgpu_connector_virtual_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = amdgpu_connector_virtual_set_property, + .destroy = amdgpu_connector_destroy, + .force = amdgpu_connector_virtual_force, +}; + void amdgpu_connector_add(struct amdgpu_device *adev, uint32_t connector_id, @@ -1888,6 +1968,17 @@ amdgpu_connector_add(struct amdgpu_device *adev, connector->interlace_allowed = false; connector->doublescan_allowed = false; break; + case DRM_MODE_CONNECTOR_VIRTUAL: + amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL); + if (!amdgpu_dig_connector) + goto failed; + amdgpu_connector->con_priv = amdgpu_dig_connector; + drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_virtual_funcs, connector_type); + drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_virtual_helper_funcs); + subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; + break; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 0307ff5887c5..d80e5d3a4add 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -287,18 +287,56 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) return max(bytes_moved_threshold, 1024*1024ull); } +static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, + struct amdgpu_bo *bo) +{ + u64 initial_bytes_moved; + uint32_t domain; + int r; + + if (bo->pin_count) + return 0; + + /* Avoid moving this one if we have moved too many buffers + * for this IB already. + * + * Note that this allows moving at least one buffer of + * any size, because it doesn't take the current "bo" + * into account. We don't want to disallow buffer moves + * completely. + */ + if (p->bytes_moved <= p->bytes_moved_threshold) + domain = bo->prefered_domains; + else + domain = bo->allowed_domains; + +retry: + amdgpu_ttm_placement_from_domain(bo, domain); + initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); + r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); + p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - + initial_bytes_moved; + + if (unlikely(r)) { + if (r != -ERESTARTSYS && domain != bo->allowed_domains) { + domain = bo->allowed_domains; + goto retry; + } + } + + return r; +} + int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, struct list_head *validated) { struct amdgpu_bo_list_entry *lobj; - u64 initial_bytes_moved; int r; list_for_each_entry(lobj, validated, tv.head) { struct amdgpu_bo *bo = lobj->robj; bool binding_userptr = false; struct mm_struct *usermm; - uint32_t domain; usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); if (usermm && usermm != current->mm) @@ -313,35 +351,13 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, binding_userptr = true; } - if (bo->pin_count) - continue; - - /* Avoid moving this one if we have moved too many buffers - * for this IB already. - * - * Note that this allows moving at least one buffer of - * any size, because it doesn't take the current "bo" - * into account. We don't want to disallow buffer moves - * completely. - */ - if (p->bytes_moved <= p->bytes_moved_threshold) - domain = bo->prefered_domains; - else - domain = bo->allowed_domains; - - retry: - amdgpu_ttm_placement_from_domain(bo, domain); - initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); - r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); - p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - - initial_bytes_moved; - - if (unlikely(r)) { - if (r != -ERESTARTSYS && domain != bo->allowed_domains) { - domain = bo->allowed_domains; - goto retry; - } + r = amdgpu_cs_bo_validate(p, bo); + if (r) return r; + if (bo->shadow) { + r = amdgpu_cs_bo_validate(p, bo); + if (r) + return r; } if (binding_userptr) { @@ -386,8 +402,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates); - if (unlikely(r != 0)) + if (unlikely(r != 0)) { + DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); goto error_free_pages; + } /* Without a BO list we don't have userptr BOs */ if (!p->bo_list) @@ -427,9 +445,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, /* Unreserve everything again. */ ttm_eu_backoff_reservation(&p->ticket, &p->validated); - /* We tried to often, just abort */ + /* We tried too many times, just abort */ if (!--tries) { r = -EDEADLK; + DRM_ERROR("deadlock in %s\n", __func__); goto error_free_pages; } @@ -441,11 +460,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, sizeof(struct page*)); if (!e->user_pages) { r = -ENOMEM; + DRM_ERROR("calloc failure in %s\n", __func__); goto error_free_pages; } r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages); if (r) { + DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n"); drm_free_large(e->user_pages); e->user_pages = NULL; goto error_free_pages; @@ -462,12 +483,16 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, p->bytes_moved = 0; r = amdgpu_cs_list_validate(p, &duplicates); - if (r) + if (r) { + DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n"); goto error_validate; + } r = amdgpu_cs_list_validate(p, &p->validated); - if (r) + if (r) { + DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n"); goto error_validate; + } fpriv->vm.last_eviction_counter = atomic64_read(&p->adev->num_evictions); @@ -617,7 +642,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, if (bo_va == NULL) continue; - r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem); + r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index df7ab2458e50..c38dc47cd767 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -46,6 +46,7 @@ #endif #include "vi.h" #include "bif/bif_4_1_d.h" +#include <linux/pci.h> static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); @@ -1181,10 +1182,38 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, return 1; } +static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev) +{ + adev->enable_virtual_display = false; + + if (amdgpu_virtual_display) { + struct drm_device *ddev = adev->ddev; + const char *pci_address_name = pci_name(ddev->pdev); + char *pciaddstr, *pciaddstr_tmp, *pciaddname; + + pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); + pciaddstr_tmp = pciaddstr; + while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) { + if (!strcmp(pci_address_name, pciaddname)) { + adev->enable_virtual_display = true; + break; + } + } + + DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n", + amdgpu_virtual_display, pci_address_name, + adev->enable_virtual_display); + + kfree(pciaddstr); + } +} + static int amdgpu_early_init(struct amdgpu_device *adev) { int i, r; + amdgpu_whether_enable_virtual_display(adev); + switch (adev->asic_type) { case CHIP_TOPAZ: case CHIP_TONGA: @@ -1521,6 +1550,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, spin_lock_init(&adev->gc_cac_idx_lock); spin_lock_init(&adev->audio_endpt_idx_lock); + INIT_LIST_HEAD(&adev->shadow_list); + mutex_init(&adev->shadow_list_lock); + adev->rmmio_base = pci_resource_start(adev->pdev, 5); adev->rmmio_size = pci_resource_len(adev->pdev, 5); adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); @@ -1937,6 +1969,126 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) return 0; } +static bool amdgpu_check_soft_reset(struct amdgpu_device *adev) +{ + int i; + bool asic_hang = false; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_blocks[i].funcs->check_soft_reset) + adev->ip_blocks[i].funcs->check_soft_reset(adev); + if (adev->ip_block_status[i].hang) { + DRM_INFO("IP block:%d is hang!\n", i); + asic_hang = true; + } + } + return asic_hang; +} + +int amdgpu_pre_soft_reset(struct amdgpu_device *adev) +{ + int i, r = 0; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_block_status[i].hang && + adev->ip_blocks[i].funcs->pre_soft_reset) { + r = adev->ip_blocks[i].funcs->pre_soft_reset(adev); + if (r) + return r; + } + } + + return 0; +} + +static bool amdgpu_need_full_reset(struct amdgpu_device *adev) +{ + if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang || + adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang || + adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang || + adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) { + DRM_INFO("Some block need full reset!\n"); + return true; + } + return false; +} + +static int amdgpu_soft_reset(struct amdgpu_device *adev) +{ + int i, r = 0; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_block_status[i].hang && + adev->ip_blocks[i].funcs->soft_reset) { + r = adev->ip_blocks[i].funcs->soft_reset(adev); + if (r) + return r; + } + } + + return 0; +} + +static int amdgpu_post_soft_reset(struct amdgpu_device *adev) +{ + int i, r = 0; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_block_status[i].hang && + adev->ip_blocks[i].funcs->post_soft_reset) + r = adev->ip_blocks[i].funcs->post_soft_reset(adev); + if (r) + return r; + } + + return 0; +} + +bool amdgpu_need_backup(struct amdgpu_device *adev) +{ + if (adev->flags & AMD_IS_APU) + return false; + + return amdgpu_lockup_timeout > 0 ? true : false; +} + +static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_bo *bo, + struct fence **fence) +{ + uint32_t domain; + int r; + + if (!bo->shadow) + return 0; + + r = amdgpu_bo_reserve(bo, false); + if (r) + return r; + domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); + /* if bo has been evicted, then no need to recover */ + if (domain == AMDGPU_GEM_DOMAIN_VRAM) { + r = amdgpu_bo_restore_from_shadow(adev, ring, bo, + NULL, fence, true); + if (r) { + DRM_ERROR("recover page table failed!\n"); + goto err; + } + } +err: + amdgpu_bo_unreserve(bo); + return r; +} + /** * amdgpu_gpu_reset - reset the asic * @@ -1949,6 +2101,12 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) { int i, r; int resched; + bool need_full_reset; + + if (!amdgpu_check_soft_reset(adev)) { + DRM_INFO("No hardware hang detected. Did some blocks stall?\n"); + return 0; + } atomic_inc(&adev->gpu_reset_counter); @@ -1967,40 +2125,88 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ amdgpu_fence_driver_force_completion(adev); - /* save scratch */ - amdgpu_atombios_scratch_regs_save(adev); - r = amdgpu_suspend(adev); + need_full_reset = amdgpu_need_full_reset(adev); -retry: - /* Disable fb access */ - if (adev->mode_info.num_crtc) { - struct amdgpu_mode_mc_save save; - amdgpu_display_stop_mc_access(adev, &save); - amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC); + if (!need_full_reset) { + amdgpu_pre_soft_reset(adev); + r = amdgpu_soft_reset(adev); + amdgpu_post_soft_reset(adev); + if (r || amdgpu_check_soft_reset(adev)) { + DRM_INFO("soft reset failed, will fallback to full reset!\n"); + need_full_reset = true; + } } - r = amdgpu_asic_reset(adev); - /* post card */ - amdgpu_atom_asic_init(adev->mode_info.atom_context); + if (need_full_reset) { + /* save scratch */ + amdgpu_atombios_scratch_regs_save(adev); + r = amdgpu_suspend(adev); - if (!r) { - dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); - r = amdgpu_resume(adev); +retry: + /* Disable fb access */ + if (adev->mode_info.num_crtc) { + struct amdgpu_mode_mc_save save; + amdgpu_display_stop_mc_access(adev, &save); + amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC); + } + + r = amdgpu_asic_reset(adev); + /* post card */ + amdgpu_atom_asic_init(adev->mode_info.atom_context); + + if (!r) { + dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); + r = amdgpu_resume(adev); + } + /* restore scratch */ + amdgpu_atombios_scratch_regs_restore(adev); } - /* restore scratch */ - amdgpu_atombios_scratch_regs_restore(adev); if (!r) { + amdgpu_irq_gpu_reset_resume_helper(adev); r = amdgpu_ib_ring_tests(adev); if (r) { dev_err(adev->dev, "ib ring test failed (%d).\n", r); r = amdgpu_suspend(adev); + need_full_reset = true; goto retry; } + /** + * recovery vm page tables, since we cannot depend on VRAM is + * consistent after gpu full reset. + */ + if (need_full_reset && amdgpu_need_backup(adev)) { + struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; + struct amdgpu_bo *bo, *tmp; + struct fence *fence = NULL, *next = NULL; + + DRM_INFO("recover vram bo from shadow\n"); + mutex_lock(&adev->shadow_list_lock); + list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { + amdgpu_recover_vram_from_shadow(adev, ring, bo, &next); + if (fence) { + r = fence_wait(fence, false); + if (r) { + WARN(r, "recovery from shadow isn't comleted\n"); + break; + } + } + fence_put(fence); + fence = next; + } + mutex_unlock(&adev->shadow_list_lock); + if (fence) { + r = fence_wait(fence, false); + if (r) + WARN(r, "recovery from shadow isn't comleted\n"); + } + fence_put(fence); + } for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; if (!ring) continue; + amd_sched_job_recovery(&ring->sched); kthread_unpark(ring->sched.thread); } @@ -2020,7 +2226,6 @@ retry: /* bad news, how to tell it to userspace ? */ dev_info(adev->dev, "GPU reset failed\n"); } - amdgpu_irq_gpu_reset_resume_helper(adev); return r; } @@ -2178,22 +2383,26 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, struct amdgpu_device *adev = f->f_inode->i_private; ssize_t result = 0; int r; - bool use_bank; + bool pm_pg_lock, use_bank; unsigned instance_bank, sh_bank, se_bank; if (size & 0x3 || *pos & 0x3) return -EINVAL; + /* are we reading registers for which a PG lock is necessary? */ + pm_pg_lock = (*pos >> 23) & 1; + if (*pos & (1ULL << 62)) { se_bank = (*pos >> 24) & 0x3FF; sh_bank = (*pos >> 34) & 0x3FF; instance_bank = (*pos >> 44) & 0x3FF; use_bank = 1; - *pos &= 0xFFFFFF; } else { use_bank = 0; } + *pos &= 0x3FFFF; + if (use_bank) { if (sh_bank >= adev->gfx.config.max_sh_per_se || se_bank >= adev->gfx.config.max_shader_engines) @@ -2203,6 +2412,9 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, sh_bank, instance_bank); } + if (pm_pg_lock) + mutex_lock(&adev->pm.mutex); + while (size) { uint32_t value; @@ -2228,6 +2440,9 @@ end: mutex_unlock(&adev->grbm_idx_mutex); } + if (pm_pg_lock) + mutex_unlock(&adev->pm.mutex); + return result; } @@ -2443,7 +2658,7 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, return -ENOMEM; /* version, increment each time something is added */ - config[no_regs++] = 0; + config[no_regs++] = 2; config[no_regs++] = adev->gfx.config.max_shader_engines; config[no_regs++] = adev->gfx.config.max_tile_pipes; config[no_regs++] = adev->gfx.config.max_cu_per_sh; @@ -2468,6 +2683,15 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, config[no_regs++] = adev->gfx.config.gb_addr_config; config[no_regs++] = adev->gfx.config.num_rbs; + /* rev==1 */ + config[no_regs++] = adev->rev_id; + config[no_regs++] = adev->pg_flags; + config[no_regs++] = adev->cg_flags; + + /* rev==2 */ + config[no_regs++] = adev->family; + config[no_regs++] = adev->external_rev_id; + while (size && (*pos < no_regs * 4)) { uint32_t value; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 76f96028313d..9af8d3c7ae8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -41,7 +41,7 @@ static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb) container_of(cb, struct amdgpu_flip_work, cb); fence_put(f); - schedule_work(&work->flip_work); + schedule_work(&work->flip_work.work); } static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, @@ -63,16 +63,17 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, static void amdgpu_flip_work_func(struct work_struct *__work) { + struct delayed_work *delayed_work = + container_of(__work, struct delayed_work, work); struct amdgpu_flip_work *work = - container_of(__work, struct amdgpu_flip_work, flip_work); + container_of(delayed_work, struct amdgpu_flip_work, flip_work); struct amdgpu_device *adev = work->adev; struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id]; struct drm_crtc *crtc = &amdgpuCrtc->base; unsigned long flags; - unsigned i, repcnt = 4; - int vpos, hpos, stat, min_udelay = 0; - struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; + unsigned i; + int vpos, hpos; if (amdgpu_flip_handle_fence(work, &work->excl)) return; @@ -81,55 +82,23 @@ static void amdgpu_flip_work_func(struct work_struct *__work) if (amdgpu_flip_handle_fence(work, &work->shared[i])) return; - /* We borrow the event spin lock for protecting flip_status */ - spin_lock_irqsave(&crtc->dev->event_lock, flags); - - /* If this happens to execute within the "virtually extended" vblank - * interval before the start of the real vblank interval then it needs - * to delay programming the mmio flip until the real vblank is entered. - * This prevents completing a flip too early due to the way we fudge - * our vblank counter and vblank timestamps in order to work around the - * problem that the hw fires vblank interrupts before actual start of - * vblank (when line buffer refilling is done for a frame). It - * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for - * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts. - * - * In practice this won't execute very often unless on very fast - * machines because the time window for this to happen is very small. + /* Wait until we're out of the vertical blank period before the one + * targeted by the flip */ - while (amdgpuCrtc->enabled && --repcnt) { - /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank - * start in hpos, and to the "fudged earlier" vblank start in - * vpos. - */ - stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, - GET_DISTANCE_TO_VBLANKSTART, - &vpos, &hpos, NULL, NULL, - &crtc->hwmode); - - if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != - (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) || - !(vpos >= 0 && hpos <= 0)) - break; - - /* Sleep at least until estimated real start of hw vblank */ - min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); - if (min_udelay > vblank->framedur_ns / 2000) { - /* Don't wait ridiculously long - something is wrong */ - repcnt = 0; - break; - } - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - usleep_range(min_udelay, 2 * min_udelay); - spin_lock_irqsave(&crtc->dev->event_lock, flags); + if (amdgpuCrtc->enabled && + (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0, + &vpos, &hpos, NULL, NULL, + &crtc->hwmode) + & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == + (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && + (int)(work->target_vblank - + amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) { + schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000)); + return; } - if (!repcnt) - DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, " - "framedur %d, linedur %d, stat %d, vpos %d, " - "hpos %d\n", work->crtc_id, min_udelay, - vblank->framedur_ns / 1000, - vblank->linedur_ns / 1000, stat, vpos, hpos); + /* We borrow the event spin lock for protecting flip_status */ + spin_lock_irqsave(&crtc->dev->event_lock, flags); /* Do the flip (mmio) */ adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async); @@ -169,10 +138,10 @@ static void amdgpu_unpin_work_func(struct work_struct *__work) kfree(work); } -int amdgpu_crtc_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags) +int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags, uint32_t target) { struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; @@ -191,7 +160,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, if (work == NULL) return -ENOMEM; - INIT_WORK(&work->flip_work, amdgpu_flip_work_func); + INIT_DELAYED_WORK(&work->flip_work, amdgpu_flip_work_func); INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func); work->event = event; @@ -237,12 +206,8 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, amdgpu_bo_unreserve(new_rbo); work->base = base; - - r = drm_crtc_vblank_get(crtc); - if (r) { - DRM_ERROR("failed to get vblank before flip\n"); - goto pflip_cleanup; - } + work->target_vblank = target - drm_crtc_vblank_count(crtc) + + amdgpu_get_vblank_counter_kms(dev, work->crtc_id); /* we borrow the event spin lock for protecting flip_wrok */ spin_lock_irqsave(&crtc->dev->event_lock, flags); @@ -250,7 +215,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); spin_unlock_irqrestore(&crtc->dev->event_lock, flags); r = -EBUSY; - goto vblank_cleanup; + goto pflip_cleanup; } amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING; @@ -262,12 +227,9 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, /* update crtc fb */ crtc->primary->fb = fb; spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - amdgpu_flip_work_func(&work->flip_work); + amdgpu_flip_work_func(&work->flip_work.work); return 0; -vblank_cleanup: - drm_crtc_vblank_put(crtc); - pflip_cleanup: if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) { DRM_ERROR("failed to reserve new rbo in error path\n"); @@ -335,7 +297,7 @@ int amdgpu_crtc_set_config(struct drm_mode_set *set) return ret; } -static const char *encoder_names[38] = { +static const char *encoder_names[41] = { "NONE", "INTERNAL_LVDS", "INTERNAL_TMDS1", @@ -374,6 +336,9 @@ static const char *encoder_names[38] = { "TRAVIS", "INTERNAL_VCE", "INTERNAL_UNIPHY3", + "HDMI_ANX9805", + "INTERNAL_AMCLK", + "VIRTUAL", }; static const char *hpd_names[6] = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 9aa533cf4ad1..7c911d0be2b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -53,9 +53,11 @@ * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same * at the end of IBs. * - 3.3.0 - Add VM support for UVD on supported hardware. + * - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS. + * - 3.5.0 - Add support for new UVD_NO_OP register. */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 3 +#define KMS_DRIVER_MINOR 5 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; @@ -84,11 +86,13 @@ int amdgpu_sched_jobs = 32; int amdgpu_sched_hw_submission = 2; int amdgpu_powerplay = -1; int amdgpu_powercontainment = 1; +int amdgpu_sclk_deep_sleep_en = 1; unsigned amdgpu_pcie_gen_cap = 0; unsigned amdgpu_pcie_lane_cap = 0; unsigned amdgpu_cg_mask = 0xffffffff; unsigned amdgpu_pg_mask = 0xffffffff; char *amdgpu_disable_cu = NULL; +char *amdgpu_virtual_display = NULL; MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); @@ -170,6 +174,9 @@ MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = module_param_named(powercontainment, amdgpu_powercontainment, int, 0444); #endif +MODULE_PARM_DESC(sclkdeepsleep, "SCLK Deep Sleep (1 = enable (default), 0 = disable)"); +module_param_named(sclkdeepsleep, amdgpu_sclk_deep_sleep_en, int, 0444); + MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))"); module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444); @@ -185,6 +192,9 @@ module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444); MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)"); module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444); +MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x;xxxx:xx:xx.x)"); +module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444); + static const struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_CIK /* Kaveri */ @@ -341,7 +351,7 @@ static int amdgpu_kick_out_firmware_fb(struct pci_dev *pdev) #ifdef CONFIG_X86 primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; #endif - remove_conflicting_framebuffers(ap, "amdgpudrmfb", primary); + drm_fb_helper_remove_conflicting_framebuffers(ap, "amdgpudrmfb", primary); kfree(ap); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 919146780a15..bf033b58056c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -25,7 +25,6 @@ */ #include <linux/module.h> #include <linux/slab.h> -#include <linux/fb.h> #include <drm/drmP.h> #include <drm/drm_crtc.h> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h index 503d54098128..e73728d90388 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h @@ -31,14 +31,6 @@ #define AMDGPU_GWS_SHIFT PAGE_SHIFT #define AMDGPU_OA_SHIFT PAGE_SHIFT -#define AMDGPU_PL_GDS TTM_PL_PRIV0 -#define AMDGPU_PL_GWS TTM_PL_PRIV1 -#define AMDGPU_PL_OA TTM_PL_PRIV2 - -#define AMDGPU_PL_FLAG_GDS TTM_PL_FLAG_PRIV0 -#define AMDGPU_PL_FLAG_GWS TTM_PL_FLAG_PRIV1 -#define AMDGPU_PL_FLAG_OA TTM_PL_FLAG_PRIV2 - struct amdgpu_ring; struct amdgpu_bo; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index 31a676376d73..c93a92a840ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c @@ -186,10 +186,8 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev, "AMDGPU i2c hw bus %s", name); i2c->adapter.algo = &amdgpu_atombios_i2c_algo; ret = i2c_add_adapter(&i2c->adapter); - if (ret) { - DRM_ERROR("Failed to register hw i2c %s\n", name); + if (ret) goto out_free; - } } else { /* set the amdgpu bit adapter */ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index a31d7ef3032c..f5810f700668 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -142,7 +142,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, } if (!ring->ready) { - dev_err(adev->dev, "couldn't schedule ib\n"); + dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index 534fc04e80fd..5ebb3f43feb6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -40,32 +40,15 @@ static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev) /* Allocate ring buffer */ if (adev->irq.ih.ring_obj == NULL) { - r = amdgpu_bo_create(adev, adev->irq.ih.ring_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, - NULL, NULL, &adev->irq.ih.ring_obj); + r = amdgpu_bo_create_kernel(adev, adev->irq.ih.ring_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, + &adev->irq.ih.ring_obj, + &adev->irq.ih.gpu_addr, + (void **)&adev->irq.ih.ring); if (r) { DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r); return r; } - r = amdgpu_bo_reserve(adev->irq.ih.ring_obj, false); - if (unlikely(r != 0)) - return r; - r = amdgpu_bo_pin(adev->irq.ih.ring_obj, - AMDGPU_GEM_DOMAIN_GTT, - &adev->irq.ih.gpu_addr); - if (r) { - amdgpu_bo_unreserve(adev->irq.ih.ring_obj); - DRM_ERROR("amdgpu: failed to pin ih ring buffer (%d).\n", r); - return r; - } - r = amdgpu_bo_kmap(adev->irq.ih.ring_obj, - (void **)&adev->irq.ih.ring); - amdgpu_bo_unreserve(adev->irq.ih.ring_obj); - if (r) { - DRM_ERROR("amdgpu: failed to map ih ring buffer (%d).\n", r); - return r; - } } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h index 7ef09352e534..f016464035b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h @@ -70,6 +70,7 @@ struct amdgpu_irq { /* gen irq stuff */ struct irq_domain *domain; /* GPU irq controller domain */ unsigned virq[AMDGPU_MAX_IRQ_SRC_ID]; + uint32_t srbm_soft_reset; }; void amdgpu_irq_preinstall(struct drm_device *dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index d942654a1de0..b78e74048f3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -292,14 +292,14 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file type = AMD_IP_BLOCK_TYPE_UVD; ring_mask = adev->uvd.ring.ready ? 1 : 0; ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; - ib_size_alignment = 8; + ib_size_alignment = 16; break; case AMDGPU_HW_IP_VCE: type = AMD_IP_BLOCK_TYPE_VCE; for (i = 0; i < AMDGPU_MAX_VCE_RINGS; i++) ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i); ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; - ib_size_alignment = 8; + ib_size_alignment = 1; break; default: return -EINVAL; @@ -373,6 +373,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file case AMDGPU_INFO_NUM_BYTES_MOVED: ui64 = atomic64_read(&adev->num_bytes_moved); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; + case AMDGPU_INFO_NUM_EVICTIONS: + ui64 = atomic64_read(&adev->num_evictions); + return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_VRAM_USAGE: ui64 = atomic64_read(&adev->vram_usage); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 6b1d7d306564..7b0eff7d060b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -39,6 +39,8 @@ #include <drm/drm_plane_helper.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> +#include <linux/hrtimer.h> +#include "amdgpu_irq.h" struct amdgpu_bo; struct amdgpu_device; @@ -339,6 +341,8 @@ struct amdgpu_mode_info { int num_dig; /* number of dig blocks */ int disp_priority; const struct amdgpu_display_funcs *funcs; + struct hrtimer vblank_timer; + enum amdgpu_interrupt_state vsync_timer_enabled; }; #define AMDGPU_MAX_BL_LEVEL 0xFF @@ -587,10 +591,10 @@ int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tile void amdgpu_print_display_setup(struct drm_device *dev); int amdgpu_modeset_create_props(struct amdgpu_device *adev); int amdgpu_crtc_set_config(struct drm_mode_set *set); -int amdgpu_crtc_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags); +int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags, uint32_t target); extern const struct drm_mode_config_funcs amdgpu_mode_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 6f0873c75a25..b17734e0ecc8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -44,14 +44,13 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev); static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev, struct ttm_mem_reg *mem) { - u64 ret = 0; - if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) { - ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) > - adev->mc.visible_vram_size ? - adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) : - mem->size; - } - return ret; + if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size) + return 0; + + return ((mem->start << PAGE_SHIFT) + mem->size) > + adev->mc.visible_vram_size ? + adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) : + mem->size; } static void amdgpu_update_memory_usage(struct amdgpu_device *adev, @@ -99,6 +98,11 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) drm_gem_object_release(&bo->gem_base); amdgpu_bo_unref(&bo->parent); + if (!list_empty(&bo->shadow_list)) { + mutex_lock(&bo->adev->shadow_list_lock); + list_del_init(&bo->shadow_list); + mutex_unlock(&bo->adev->shadow_list_lock); + } kfree(bo->metadata); kfree(bo); } @@ -112,84 +116,93 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, struct ttm_placement *placement, - struct ttm_place *placements, + struct ttm_place *places, u32 domain, u64 flags) { - u32 c = 0, i; - - placement->placement = placements; - placement->busy_placement = placements; + u32 c = 0; if (domain & AMDGPU_GEM_DOMAIN_VRAM) { + unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT; + if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS && - adev->mc.visible_vram_size < adev->mc.real_vram_size) { - placements[c].fpfn = - adev->mc.visible_vram_size >> PAGE_SHIFT; - placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN; + !(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && + adev->mc.visible_vram_size < adev->mc.real_vram_size) { + places[c].fpfn = visible_pfn; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_WC | + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM | + TTM_PL_FLAG_TOPDOWN; + c++; } - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | + + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; - if (!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) - placements[c - 1].flags |= TTM_PL_FLAG_TOPDOWN; + if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) + places[c].lpfn = visible_pfn; + else + places[c].flags |= TTM_PL_FLAG_TOPDOWN; + c++; } if (domain & AMDGPU_GEM_DOMAIN_GTT) { - if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT | + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_TT; + if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) + places[c].flags |= TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; - } else { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; - } + else + places[c].flags |= TTM_PL_FLAG_CACHED; + c++; } if (domain & AMDGPU_GEM_DOMAIN_CPU) { - if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM | + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_SYSTEM; + if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) + places[c].flags |= TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; - } else { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; - } + else + places[c].flags |= TTM_PL_FLAG_CACHED; + c++; } if (domain & AMDGPU_GEM_DOMAIN_GDS) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_UNCACHED | - AMDGPU_PL_FLAG_GDS; + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS; + c++; } + if (domain & AMDGPU_GEM_DOMAIN_GWS) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_UNCACHED | - AMDGPU_PL_FLAG_GWS; + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS; + c++; } + if (domain & AMDGPU_GEM_DOMAIN_OA) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_FLAG_UNCACHED | - AMDGPU_PL_FLAG_OA; + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA; + c++; } if (!c) { - placements[c].fpfn = 0; - placements[c++].flags = TTM_PL_MASK_CACHING | - TTM_PL_FLAG_SYSTEM; + places[c].fpfn = 0; + places[c].lpfn = 0; + places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + c++; } + placement->num_placement = c; - placement->num_busy_placement = c; + placement->placement = places; - for (i = 0; i < c; i++) { - if ((flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && - (placements[i].flags & TTM_PL_FLAG_VRAM) && - !placements[i].fpfn) - placements[i].lpfn = - adev->mc.visible_vram_size >> PAGE_SHIFT; - else - placements[i].lpfn = 0; - } + placement->num_busy_placement = c; + placement->busy_placement = places; } void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain) @@ -211,6 +224,69 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, bo->placement.busy_placement = bo->placements; } +/** + * amdgpu_bo_create_kernel - create BO for kernel use + * + * @adev: amdgpu device object + * @size: size for the new BO + * @align: alignment for the new BO + * @domain: where to place it + * @bo_ptr: resulting BO + * @gpu_addr: GPU addr of the pinned BO + * @cpu_addr: optional CPU address mapping + * + * Allocates and pins a BO for kernel internal use. + * + * Returns 0 on success, negative error code otherwise. + */ +int amdgpu_bo_create_kernel(struct amdgpu_device *adev, + unsigned long size, int align, + u32 domain, struct amdgpu_bo **bo_ptr, + u64 *gpu_addr, void **cpu_addr) +{ + int r; + + r = amdgpu_bo_create(adev, size, align, true, domain, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, bo_ptr); + if (r) { + dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r); + return r; + } + + r = amdgpu_bo_reserve(*bo_ptr, false); + if (r) { + dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r); + goto error_free; + } + + r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr); + if (r) { + dev_err(adev->dev, "(%d) kernel bo pin failed\n", r); + goto error_unreserve; + } + + if (cpu_addr) { + r = amdgpu_bo_kmap(*bo_ptr, cpu_addr); + if (r) { + dev_err(adev->dev, "(%d) kernel bo map failed\n", r); + goto error_unreserve; + } + } + + amdgpu_bo_unreserve(*bo_ptr); + + return 0; + +error_unreserve: + amdgpu_bo_unreserve(*bo_ptr); + +error_free: + amdgpu_bo_unref(bo_ptr); + + return r; +} + int amdgpu_bo_create_restricted(struct amdgpu_device *adev, unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, @@ -250,6 +326,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, } bo->adev = adev; INIT_LIST_HEAD(&bo->list); + INIT_LIST_HEAD(&bo->shadow_list); INIT_LIST_HEAD(&bo->va); bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT | @@ -277,11 +354,79 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, if (unlikely(r != 0)) { return r; } + + if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && + bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { + struct fence *fence; + + if (adev->mman.buffer_funcs_ring == NULL || + !adev->mman.buffer_funcs_ring->ready) { + r = -EBUSY; + goto fail_free; + } + + r = amdgpu_bo_reserve(bo, false); + if (unlikely(r != 0)) + goto fail_free; + + amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); + if (unlikely(r != 0)) + goto fail_unreserve; + + amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); + amdgpu_bo_fence(bo, fence, false); + amdgpu_bo_unreserve(bo); + fence_put(bo->tbo.moving); + bo->tbo.moving = fence_get(fence); + fence_put(fence); + } *bo_ptr = bo; trace_amdgpu_bo_create(bo); return 0; + +fail_unreserve: + amdgpu_bo_unreserve(bo); +fail_free: + amdgpu_bo_unref(&bo); + return r; +} + +static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, + unsigned long size, int byte_align, + struct amdgpu_bo *bo) +{ + struct ttm_placement placement = {0}; + struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; + int r; + + if (bo->shadow) + return 0; + + bo->flags |= AMDGPU_GEM_CREATE_SHADOW; + memset(&placements, 0, + (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); + + amdgpu_ttm_placement_init(adev, &placement, + placements, AMDGPU_GEM_DOMAIN_GTT, + AMDGPU_GEM_CREATE_CPU_GTT_USWC); + + r = amdgpu_bo_create_restricted(adev, size, byte_align, true, + AMDGPU_GEM_DOMAIN_GTT, + AMDGPU_GEM_CREATE_CPU_GTT_USWC, + NULL, &placement, + bo->tbo.resv, + &bo->shadow); + if (!r) { + bo->shadow->parent = amdgpu_bo_ref(bo); + mutex_lock(&adev->shadow_list_lock); + list_add_tail(&bo->shadow_list, &adev->shadow_list); + mutex_unlock(&adev->shadow_list_lock); + } + + return r; } int amdgpu_bo_create(struct amdgpu_device *adev, @@ -293,6 +438,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, { struct ttm_placement placement = {0}; struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; + int r; memset(&placements, 0, (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); @@ -300,9 +446,83 @@ int amdgpu_bo_create(struct amdgpu_device *adev, amdgpu_ttm_placement_init(adev, &placement, placements, domain, flags); - return amdgpu_bo_create_restricted(adev, size, byte_align, kernel, - domain, flags, sg, &placement, - resv, bo_ptr); + r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, + domain, flags, sg, &placement, + resv, bo_ptr); + if (r) + return r; + + if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) { + r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr)); + if (r) + amdgpu_bo_unref(bo_ptr); + } + + return r; +} + +int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_bo *bo, + struct reservation_object *resv, + struct fence **fence, + bool direct) + +{ + struct amdgpu_bo *shadow = bo->shadow; + uint64_t bo_addr, shadow_addr; + int r; + + if (!shadow) + return -EINVAL; + + bo_addr = amdgpu_bo_gpu_offset(bo); + shadow_addr = amdgpu_bo_gpu_offset(bo->shadow); + + r = reservation_object_reserve_shared(bo->tbo.resv); + if (r) + goto err; + + r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr, + amdgpu_bo_size(bo), resv, fence, + direct); + if (!r) + amdgpu_bo_fence(bo, *fence, true); + +err: + return r; +} + +int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_bo *bo, + struct reservation_object *resv, + struct fence **fence, + bool direct) + +{ + struct amdgpu_bo *shadow = bo->shadow; + uint64_t bo_addr, shadow_addr; + int r; + + if (!shadow) + return -EINVAL; + + bo_addr = amdgpu_bo_gpu_offset(bo); + shadow_addr = amdgpu_bo_gpu_offset(bo->shadow); + + r = reservation_object_reserve_shared(bo->tbo.resv); + if (r) + goto err; + + r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr, + amdgpu_bo_size(bo), resv, fence, + direct); + if (!r) + amdgpu_bo_fence(bo, *fence, true); + +err: + return r; } int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) @@ -380,16 +600,17 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, return -EINVAL; if (bo->pin_count) { + uint32_t mem_type = bo->tbo.mem.mem_type; + + if (domain != amdgpu_mem_type_to_domain(mem_type)) + return -EINVAL; + bo->pin_count++; if (gpu_addr) *gpu_addr = amdgpu_bo_gpu_offset(bo); if (max_offset != 0) { - u64 domain_start; - if (domain == AMDGPU_GEM_DOMAIN_VRAM) - domain_start = bo->adev->mc.vram_start; - else - domain_start = bo->adev->mc.gtt_start; + u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset; WARN_ON_ONCE(max_offset < (amdgpu_bo_gpu_offset(bo) - domain_start)); } @@ -401,7 +622,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, /* force to pin into visible video ram */ if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) && - (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) { + (!max_offset || max_offset > + bo->adev->mc.visible_vram_size)) { if (WARN_ON_ONCE(min_offset > bo->adev->mc.visible_vram_size)) return -EINVAL; @@ -420,19 +642,23 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, } r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); - if (likely(r == 0)) { - bo->pin_count = 1; - if (gpu_addr != NULL) - *gpu_addr = amdgpu_bo_gpu_offset(bo); - if (domain == AMDGPU_GEM_DOMAIN_VRAM) { - bo->adev->vram_pin_size += amdgpu_bo_size(bo); - if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) - bo->adev->invisible_pin_size += amdgpu_bo_size(bo); - } else - bo->adev->gart_pin_size += amdgpu_bo_size(bo); - } else { + if (unlikely(r)) { dev_err(bo->adev->dev, "%p pin failed\n", bo); + goto error; + } + + bo->pin_count = 1; + if (gpu_addr != NULL) + *gpu_addr = amdgpu_bo_gpu_offset(bo); + if (domain == AMDGPU_GEM_DOMAIN_VRAM) { + bo->adev->vram_pin_size += amdgpu_bo_size(bo); + if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) + bo->adev->invisible_pin_size += amdgpu_bo_size(bo); + } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { + bo->adev->gart_pin_size += amdgpu_bo_size(bo); } + +error: return r; } @@ -457,16 +683,20 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo) bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; } r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); - if (likely(r == 0)) { - if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { - bo->adev->vram_pin_size -= amdgpu_bo_size(bo); - if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) - bo->adev->invisible_pin_size -= amdgpu_bo_size(bo); - } else - bo->adev->gart_pin_size -= amdgpu_bo_size(bo); - } else { + if (unlikely(r)) { dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo); + goto error; + } + + if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { + bo->adev->vram_pin_size -= amdgpu_bo_size(bo); + if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) + bo->adev->invisible_pin_size -= amdgpu_bo_size(bo); + } else { + bo->adev->gart_pin_size -= amdgpu_bo_size(bo); } + +error: return r; } @@ -637,7 +867,8 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) for (i = 0; i < abo->placement.num_placement; i++) { /* Force into visible VRAM */ if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) && - (!abo->placements[i].lpfn || abo->placements[i].lpfn > lpfn)) + (!abo->placements[i].lpfn || + abo->placements[i].lpfn > lpfn)) abo->placements[i].lpfn = lpfn; } r = ttm_bo_validate(bo, &abo->placement, false, false); @@ -674,3 +905,21 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, else reservation_object_add_excl_fence(resv, fence); } + +/** + * amdgpu_bo_gpu_offset - return GPU offset of bo + * @bo: amdgpu object for which we query the offset + * + * Returns current GPU offset of the object. + * + * Note: object should either be pinned or reserved when calling this + * function, it might be useful to add check for this for debugging. + */ +u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) +{ + WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); + WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) && + !bo->pin_count); + + return bo->tbo.offset; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index bdb01d932548..b6a27390ef88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -85,21 +85,6 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo) ttm_bo_unreserve(&bo->tbo); } -/** - * amdgpu_bo_gpu_offset - return GPU offset of bo - * @bo: amdgpu object for which we query the offset - * - * Returns current GPU offset of the object. - * - * Note: object should either be pinned or reserved when calling this - * function, it might be useful to add check for this for debugging. - */ -static inline u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) -{ - WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); - return bo->tbo.offset; -} - static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo) { return bo->tbo.num_pages << PAGE_SHIFT; @@ -139,6 +124,10 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, struct ttm_placement *placement, struct reservation_object *resv, struct amdgpu_bo **bo_ptr); +int amdgpu_bo_create_kernel(struct amdgpu_device *adev, + unsigned long size, int align, + u32 domain, struct amdgpu_bo **bo_ptr, + u64 *gpu_addr, void **cpu_addr); int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); void amdgpu_bo_kunmap(struct amdgpu_bo *bo); struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); @@ -165,6 +154,19 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, bool shared); +u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); +int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_bo *bo, + struct reservation_object *resv, + struct fence **fence, bool direct); +int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_bo *bo, + struct reservation_object *resv, + struct fence **fence, + bool direct); + /* * sub allocation diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index ff63b88b0ffa..d4ec3cb187a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -305,7 +305,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; char *table = NULL; - int size, i; + int size; if (adev->pp_enabled) size = amdgpu_dpm_get_pp_table(adev, &table); @@ -315,10 +315,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, if (size >= PAGE_SIZE) size = PAGE_SIZE - 1; - for (i = 0; i < size; i++) { - sprintf(buf + i, "%02x", table[i]); - } - sprintf(buf + i, "\n"); + memcpy(buf, table, size); return size; } @@ -1106,54 +1103,46 @@ force: void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) { - if (adev->pp_enabled) + if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) { + /* enable/disable UVD */ + mutex_lock(&adev->pm.mutex); amdgpu_dpm_powergate_uvd(adev, !enable); - else { - if (adev->pm.funcs->powergate_uvd) { + mutex_unlock(&adev->pm.mutex); + } else { + if (enable) { mutex_lock(&adev->pm.mutex); - /* enable/disable UVD */ - amdgpu_dpm_powergate_uvd(adev, !enable); + adev->pm.dpm.uvd_active = true; + adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; mutex_unlock(&adev->pm.mutex); } else { - if (enable) { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.uvd_active = true; - adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; - mutex_unlock(&adev->pm.mutex); - } else { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.uvd_active = false; - mutex_unlock(&adev->pm.mutex); - } - amdgpu_pm_compute_clocks(adev); + mutex_lock(&adev->pm.mutex); + adev->pm.dpm.uvd_active = false; + mutex_unlock(&adev->pm.mutex); } - + amdgpu_pm_compute_clocks(adev); } } void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) { - if (adev->pp_enabled) + if (adev->pp_enabled || adev->pm.funcs->powergate_vce) { + /* enable/disable VCE */ + mutex_lock(&adev->pm.mutex); amdgpu_dpm_powergate_vce(adev, !enable); - else { - if (adev->pm.funcs->powergate_vce) { + mutex_unlock(&adev->pm.mutex); + } else { + if (enable) { mutex_lock(&adev->pm.mutex); - amdgpu_dpm_powergate_vce(adev, !enable); + adev->pm.dpm.vce_active = true; + /* XXX select vce level based on ring/task */ + adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL; mutex_unlock(&adev->pm.mutex); } else { - if (enable) { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.vce_active = true; - /* XXX select vce level based on ring/task */ - adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL; - mutex_unlock(&adev->pm.mutex); - } else { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.vce_active = false; - mutex_unlock(&adev->pm.mutex); - } - amdgpu_pm_compute_clocks(adev); + mutex_lock(&adev->pm.mutex); + adev->pm.dpm.vce_active = false; + mutex_unlock(&adev->pm.mutex); } + amdgpu_pm_compute_clocks(adev); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index c5738a22b690..545074479e1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -52,7 +52,9 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev) pp_init->chip_family = adev->family; pp_init->chip_id = adev->asic_type; pp_init->device = amdgpu_cgs_create_device(adev); - pp_init->powercontainment_enabled = amdgpu_powercontainment; + pp_init->rev_id = adev->pdev->revision; + pp_init->sub_sys_id = adev->pdev->subsystem_device; + pp_init->sub_vendor_id = adev->pdev->subsystem_vendor; ret = amd_powerplay_init(pp_init, amd_pp); kfree(pp_init); @@ -106,11 +108,10 @@ static int amdgpu_pp_early_init(void *handle) break; case CHIP_TONGA: case CHIP_FIJI: - adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true; - break; + case CHIP_TOPAZ: case CHIP_CARRIZO: case CHIP_STONEY: - adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false; + adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true; break; /* These chips don't have powerplay implemenations */ case CHIP_BONAIRE: @@ -118,7 +119,6 @@ static int amdgpu_pp_early_init(void *handle) case CHIP_KABINI: case CHIP_MULLINS: case CHIP_KAVERI: - case CHIP_TOPAZ: default: adev->pp_enabled = false; break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 85aeb0a804bb..242ba04bfde6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -222,33 +222,16 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, /* Allocate ring buffer */ if (ring->ring_obj == NULL) { - r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, - NULL, NULL, &ring->ring_obj); + r = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &ring->ring_obj, + &ring->gpu_addr, + (void **)&ring->ring); if (r) { dev_err(adev->dev, "(%d) ring create failed\n", r); return r; } - r = amdgpu_bo_reserve(ring->ring_obj, false); - if (unlikely(r != 0)) - return r; - r = amdgpu_bo_pin(ring->ring_obj, AMDGPU_GEM_DOMAIN_GTT, - &ring->gpu_addr); - if (r) { - amdgpu_bo_unreserve(ring->ring_obj); - dev_err(adev->dev, "(%d) ring pin failed\n", r); - return r; - } - r = amdgpu_bo_kmap(ring->ring_obj, - (void **)&ring->ring); - memset((void *)ring->ring, 0, ring->ring_size); - - amdgpu_bo_unreserve(ring->ring_obj); - if (r) { - dev_err(adev->dev, "(%d) ring map failed\n", r); - return r; - } } ring->ptr_mask = (ring->ring_size / 4) - 1; ring->max_dw = max_dw; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index 05a53f4fc334..b827c75e95de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -111,7 +111,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) amdgpu_bo_kunmap(gtt_obj[i]); r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr, - size, NULL, &fence); + size, NULL, &fence, false); if (r) { DRM_ERROR("Failed GTT->VRAM copy %d\n", i); @@ -156,7 +156,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) amdgpu_bo_kunmap(vram_obj); r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr, - size, NULL, &fence); + size, NULL, &fence, false); if (r) { DRM_ERROR("Failed VRAM->GTT copy %d\n", i); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index b7742e62972a..5447973483ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -34,6 +34,7 @@ #include <ttm/ttm_placement.h> #include <ttm/ttm_module.h> #include <ttm/ttm_page_alloc.h> +#include <ttm/ttm_memory.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> #include <linux/seq_file.h> @@ -74,7 +75,7 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref) ttm_mem_global_release(ref->object); } -static int amdgpu_ttm_global_init(struct amdgpu_device *adev) +int amdgpu_ttm_global_init(struct amdgpu_device *adev) { struct drm_global_reference *global_ref; struct amdgpu_ring *ring; @@ -256,10 +257,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, switch (old_mem->mem_type) { case TTM_PL_VRAM: - old_start += adev->mc.vram_start; - break; case TTM_PL_TT: - old_start += adev->mc.gtt_start; + old_start += bo->bdev->man[old_mem->mem_type].gpu_offset; break; default: DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); @@ -267,10 +266,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, } switch (new_mem->mem_type) { case TTM_PL_VRAM: - new_start += adev->mc.vram_start; - break; case TTM_PL_TT: - new_start += adev->mc.gtt_start; + new_start += bo->bdev->man[new_mem->mem_type].gpu_offset; break; default: DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); @@ -285,7 +282,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, r = amdgpu_copy_buffer(ring, old_start, new_start, new_mem->num_pages * PAGE_SIZE, /* bytes */ - bo->resv, &fence); + bo->resv, &fence, false); if (r) return r; @@ -335,7 +332,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, if (unlikely(r)) { goto out_cleanup; } - r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); + r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem); out_cleanup: ttm_bo_mem_put(bo, &tmp_mem); return r; @@ -368,7 +365,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, if (unlikely(r)) { return r; } - r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); + r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } @@ -435,8 +432,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, if (r) { memcpy: - r = ttm_bo_move_memcpy(bo, evict, interruptible, - no_wait_gpu, new_mem); + r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem); if (r) { return r; } @@ -950,6 +946,8 @@ static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo) struct list_head *res = lru->lru[tbo->mem.mem_type]; lru->lru[tbo->mem.mem_type] = &tbo->lru; + while ((++lru)->lru[tbo->mem.mem_type] == res) + lru->lru[tbo->mem.mem_type] = &tbo->lru; return res; } @@ -960,6 +958,8 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo) struct list_head *res = lru->swap_lru; lru->swap_lru = &tbo->swap; + while ((++lru)->swap_lru == res) + lru->swap_lru = &tbo->swap; return res; } @@ -987,10 +987,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) unsigned i, j; int r; - r = amdgpu_ttm_global_init(adev); - if (r) { - return r; - } /* No others user of address space so set it to 0 */ r = ttm_bo_device_init(&adev->mman.bdev, adev->mman.bo_global_ref.ref.object, @@ -1011,6 +1007,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) lru->swap_lru = &adev->mman.bdev.glob->swap_lru; } + for (j = 0; j < TTM_NUM_MEM_TYPES; ++j) + adev->mman.guard.lru[j] = NULL; + adev->mman.guard.swap_lru = NULL; + adev->mman.initialized = true; r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, adev->mc.real_vram_size >> PAGE_SHIFT); @@ -1151,7 +1151,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t dst_offset, uint32_t byte_count, struct reservation_object *resv, - struct fence **fence) + struct fence **fence, bool direct_submit) { struct amdgpu_device *adev = ring->adev; struct amdgpu_job *job; @@ -1195,8 +1195,79 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, amdgpu_ring_pad_ib(ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > num_dw); + if (direct_submit) { + r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, + NULL, NULL, fence); + job->fence = fence_get(*fence); + if (r) + DRM_ERROR("Error scheduling IBs (%d)\n", r); + amdgpu_job_free(job); + } else { + r = amdgpu_job_submit(job, ring, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, fence); + if (r) + goto error_free; + } + + return r; + +error_free: + amdgpu_job_free(job); + return r; +} + +int amdgpu_fill_buffer(struct amdgpu_bo *bo, + uint32_t src_data, + struct reservation_object *resv, + struct fence **fence) +{ + struct amdgpu_device *adev = bo->adev; + struct amdgpu_job *job; + struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; + + uint32_t max_bytes, byte_count; + uint64_t dst_offset; + unsigned int num_loops, num_dw; + unsigned int i; + int r; + + byte_count = bo->tbo.num_pages << PAGE_SHIFT; + max_bytes = adev->mman.buffer_funcs->fill_max_bytes; + num_loops = DIV_ROUND_UP(byte_count, max_bytes); + num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw; + + /* for IB padding */ + while (num_dw & 0x7) + num_dw++; + + r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); + if (r) + return r; + + if (resv) { + r = amdgpu_sync_resv(adev, &job->sync, resv, + AMDGPU_FENCE_OWNER_UNDEFINED); + if (r) { + DRM_ERROR("sync failed (%d).\n", r); + goto error_free; + } + } + + dst_offset = bo->tbo.mem.start << PAGE_SHIFT; + for (i = 0; i < num_loops; i++) { + uint32_t cur_size_in_bytes = min(byte_count, max_bytes); + + amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, + dst_offset, cur_size_in_bytes); + + dst_offset += cur_size_in_bytes; + byte_count -= cur_size_in_bytes; + } + + amdgpu_ring_pad_ib(ring, &job->ibs[0]); + WARN_ON(job->ibs[0].length_dw > num_dw); r = amdgpu_job_submit(job, ring, &adev->mman.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, fence); + AMDGPU_FENCE_OWNER_UNDEFINED, fence); if (r) goto error_free; @@ -1387,3 +1458,8 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev) #endif } + +u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev) +{ + return ttm_get_kernel_zone_memory_size(adev->mman.mem_global_ref.object); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h new file mode 100644 index 000000000000..72f6bfc15d8f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -0,0 +1,80 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_TTM_H__ +#define __AMDGPU_TTM_H__ + +#include "gpu_scheduler.h" + +#define AMDGPU_PL_GDS TTM_PL_PRIV0 +#define AMDGPU_PL_GWS TTM_PL_PRIV1 +#define AMDGPU_PL_OA TTM_PL_PRIV2 + +#define AMDGPU_PL_FLAG_GDS TTM_PL_FLAG_PRIV0 +#define AMDGPU_PL_FLAG_GWS TTM_PL_FLAG_PRIV1 +#define AMDGPU_PL_FLAG_OA TTM_PL_FLAG_PRIV2 + +#define AMDGPU_TTM_LRU_SIZE 20 + +struct amdgpu_mman_lru { + struct list_head *lru[TTM_NUM_MEM_TYPES]; + struct list_head *swap_lru; +}; + +struct amdgpu_mman { + struct ttm_bo_global_ref bo_global_ref; + struct drm_global_reference mem_global_ref; + struct ttm_bo_device bdev; + bool mem_global_referenced; + bool initialized; + +#if defined(CONFIG_DEBUG_FS) + struct dentry *vram; + struct dentry *gtt; +#endif + + /* buffer handling */ + const struct amdgpu_buffer_funcs *buffer_funcs; + struct amdgpu_ring *buffer_funcs_ring; + /* Scheduler entity for buffer moves */ + struct amd_sched_entity entity; + + /* custom LRU management */ + struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE]; + /* guard for log2_size array, don't add anything in between */ + struct amdgpu_mman_lru guard; +}; + +int amdgpu_copy_buffer(struct amdgpu_ring *ring, + uint64_t src_offset, + uint64_t dst_offset, + uint32_t byte_count, + struct reservation_object *resv, + struct fence **fence, bool direct_submit); +int amdgpu_fill_buffer(struct amdgpu_bo *bo, + uint32_t src_data, + struct reservation_object *resv, + struct fence **fence); + +int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index b11f4e8868d7..cc766cc53a87 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -201,39 +201,14 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles; - r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, - NULL, NULL, &adev->uvd.vcpu_bo); + r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo, + &adev->uvd.gpu_addr, &adev->uvd.cpu_addr); if (r) { dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); return r; } - r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); - if (r) { - amdgpu_bo_unref(&adev->uvd.vcpu_bo); - dev_err(adev->dev, "(%d) failed to reserve UVD bo\n", r); - return r; - } - - r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM, - &adev->uvd.gpu_addr); - if (r) { - amdgpu_bo_unreserve(adev->uvd.vcpu_bo); - amdgpu_bo_unref(&adev->uvd.vcpu_bo); - dev_err(adev->dev, "(%d) UVD bo pin failed\n", r); - return r; - } - - r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr); - if (r) { - dev_err(adev->dev, "(%d) UVD map failed\n", r); - return r; - } - - amdgpu_bo_unreserve(adev->uvd.vcpu_bo); - ring = &adev->uvd.ring; rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity, @@ -323,7 +298,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) if (!adev->uvd.saved_bo) return -ENOMEM; - memcpy(adev->uvd.saved_bo, ptr, size); + memcpy_fromio(adev->uvd.saved_bo, ptr, size); return 0; } @@ -340,7 +315,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) ptr = adev->uvd.cpu_addr; if (adev->uvd.saved_bo != NULL) { - memcpy(ptr, adev->uvd.saved_bo, size); + memcpy_toio(ptr, adev->uvd.saved_bo, size); kfree(adev->uvd.saved_bo); adev->uvd.saved_bo = NULL; } else { @@ -349,11 +324,11 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->uvd.fw->data; offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset, - (adev->uvd.fw->size) - offset); + memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset, + le32_to_cpu(hdr->ucode_size_bytes)); size -= le32_to_cpu(hdr->ucode_size_bytes); ptr += le32_to_cpu(hdr->ucode_size_bytes); - memset(ptr, 0, size); + memset_io(ptr, 0, size); } return 0; @@ -843,6 +818,7 @@ static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx, return r; break; case mmUVD_ENGINE_CNTL: + case mmUVD_NO_OP: break; default: DRM_ERROR("Invalid reg 0x%X!\n", reg); @@ -981,8 +957,10 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ib->ptr[3] = addr >> 32; ib->ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0); ib->ptr[5] = 0; - for (i = 6; i < 16; ++i) - ib->ptr[i] = PACKET2(0); + for (i = 6; i < 16; i += 2) { + ib->ptr[i] = PACKET0(mmUVD_NO_OP, 0); + ib->ptr[i+1] = 0; + } ib->length_dw = 16; if (direct) { @@ -1114,15 +1092,9 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) { struct amdgpu_device *adev = container_of(work, struct amdgpu_device, uvd.idle_work.work); - unsigned i, fences, handles = 0; - - fences = amdgpu_fence_count_emitted(&adev->uvd.ring); - - for (i = 0; i < adev->uvd.max_handles; ++i) - if (atomic_read(&adev->uvd.handles[i])) - ++handles; + unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring); - if (fences == 0 && handles == 0) { + if (fences == 0) { if (adev->pm.dpm_enabled) { amdgpu_dpm_enable_uvd(adev, false); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 05865ce35351..da52af2a935a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -282,8 +282,8 @@ int amdgpu_vce_resume(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->vce.fw->data; offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy(cpu_addr, (adev->vce.fw->data) + offset, - (adev->vce.fw->size) - offset); + memcpy_toio(cpu_addr, adev->vce.fw->data + offset, + adev->vce.fw->size - offset); amdgpu_bo_kunmap(adev->vce.vcpu_bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8e642fc48df4..bf56f1814437 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -51,19 +51,22 @@ * SI supports 16. */ -/* Special value that no flush is necessary */ -#define AMDGPU_VM_NO_FLUSH (~0ll) - /* Local structure. Encapsulate some VM table update parameters to reduce * the number of function parameters */ -struct amdgpu_vm_update_params { +struct amdgpu_pte_update_params { + /* amdgpu device we do this update for */ + struct amdgpu_device *adev; /* address where to copy page table entries from */ uint64_t src; - /* DMA addresses to use for mapping */ - dma_addr_t *pages_addr; /* indirect buffer to fill with commands */ struct amdgpu_ib *ib; + /* Function which actually does the update */ + void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe, + uint64_t addr, unsigned count, uint32_t incr, + uint32_t flags); + /* indicate update pt or its shadow */ + bool shadow; }; /** @@ -467,10 +470,9 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, } /** - * amdgpu_vm_update_pages - helper to call the right asic function + * amdgpu_vm_do_set_ptes - helper to call the right asic function * - * @adev: amdgpu_device pointer - * @vm_update_params: see amdgpu_vm_update_params definition + * @params: see amdgpu_pte_update_params definition * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update @@ -480,35 +482,47 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, * Traces the parameters and calls the right asic functions * to setup the page table using the DMA. */ -static void amdgpu_vm_update_pages(struct amdgpu_device *adev, - struct amdgpu_vm_update_params - *vm_update_params, - uint64_t pe, uint64_t addr, - unsigned count, uint32_t incr, - uint32_t flags) +static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params, + uint64_t pe, uint64_t addr, + unsigned count, uint32_t incr, + uint32_t flags) { trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); - if (vm_update_params->src) { - amdgpu_vm_copy_pte(adev, vm_update_params->ib, - pe, (vm_update_params->src + (addr >> 12) * 8), count); - - } else if (vm_update_params->pages_addr) { - amdgpu_vm_write_pte(adev, vm_update_params->ib, - vm_update_params->pages_addr, - pe, addr, count, incr, flags); - - } else if (count < 3) { - amdgpu_vm_write_pte(adev, vm_update_params->ib, NULL, pe, addr, - count, incr, flags); + if (count < 3) { + amdgpu_vm_write_pte(params->adev, params->ib, pe, + addr | flags, count, incr); } else { - amdgpu_vm_set_pte_pde(adev, vm_update_params->ib, pe, addr, + amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr, count, incr, flags); } } /** + * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART + * + * @params: see amdgpu_pte_update_params definition + * @pe: addr of the page entry + * @addr: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * @flags: hw access flags + * + * Traces the parameters and calls the DMA function to copy the PTEs. + */ +static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params, + uint64_t pe, uint64_t addr, + unsigned count, uint32_t incr, + uint32_t flags) +{ + trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); + + amdgpu_vm_copy_pte(params->adev, params->ib, pe, + (params->src + (addr >> 12) * 8), count); +} + +/** * amdgpu_vm_clear_bo - initially clear the page dir/table * * @adev: amdgpu_device pointer @@ -523,12 +537,11 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, struct amdgpu_ring *ring; struct fence *fence = NULL; struct amdgpu_job *job; - struct amdgpu_vm_update_params vm_update_params; + struct amdgpu_pte_update_params params; unsigned entries; uint64_t addr; int r; - memset(&vm_update_params, 0, sizeof(vm_update_params)); ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); r = reservation_object_reserve_shared(bo->tbo.resv); @@ -546,9 +559,10 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, if (r) goto error; - vm_update_params.ib = &job->ibs[0]; - amdgpu_vm_update_pages(adev, &vm_update_params, addr, 0, entries, - 0, 0); + memset(¶ms, 0, sizeof(params)); + params.adev = adev; + params.ib = &job->ibs[0]; + amdgpu_vm_do_set_ptes(¶ms, addr, 0, entries, 0, 0); amdgpu_ring_pad_ib(ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > 64); @@ -577,55 +591,41 @@ error: * Look up the physical address of the page that the pte resolves * to and return the pointer for the page table entry. */ -uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) +static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) { uint64_t result; - if (pages_addr) { - /* page table offset */ - result = pages_addr[addr >> PAGE_SHIFT]; - - /* in case cpu page size != gpu page size*/ - result |= addr & (~PAGE_MASK); + /* page table offset */ + result = pages_addr[addr >> PAGE_SHIFT]; - } else { - /* No mapping required */ - result = addr; - } + /* in case cpu page size != gpu page size*/ + result |= addr & (~PAGE_MASK); result &= 0xFFFFFFFFFFFFF000ULL; return result; } -/** - * amdgpu_vm_update_pdes - make sure that page directory is valid - * - * @adev: amdgpu_device pointer - * @vm: requested vm - * @start: start of GPU address range - * @end: end of GPU address range - * - * Allocates new page tables if necessary - * and updates the page directory. - * Returns 0 for success, error for failure. - */ -int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, - struct amdgpu_vm *vm) +static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + bool shadow) { struct amdgpu_ring *ring; - struct amdgpu_bo *pd = vm->page_directory; - uint64_t pd_addr = amdgpu_bo_gpu_offset(pd); + struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow : + vm->page_directory; + uint64_t pd_addr; uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; uint64_t last_pde = ~0, last_pt = ~0; unsigned count = 0, pt_idx, ndw; struct amdgpu_job *job; - struct amdgpu_vm_update_params vm_update_params; + struct amdgpu_pte_update_params params; struct fence *fence = NULL; int r; - memset(&vm_update_params, 0, sizeof(vm_update_params)); + if (!pd) + return 0; + pd_addr = amdgpu_bo_gpu_offset(pd); ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); /* padding, etc. */ @@ -638,7 +638,9 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, if (r) return r; - vm_update_params.ib = &job->ibs[0]; + memset(¶ms, 0, sizeof(params)); + params.adev = adev; + params.ib = &job->ibs[0]; /* walk over the address space and update the page directory */ for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { @@ -649,19 +651,25 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, continue; pt = amdgpu_bo_gpu_offset(bo); - if (vm->page_tables[pt_idx].addr == pt) - continue; - vm->page_tables[pt_idx].addr = pt; + if (!shadow) { + if (vm->page_tables[pt_idx].addr == pt) + continue; + vm->page_tables[pt_idx].addr = pt; + } else { + if (vm->page_tables[pt_idx].shadow_addr == pt) + continue; + vm->page_tables[pt_idx].shadow_addr = pt; + } pde = pd_addr + pt_idx * 8; if (((last_pde + 8 * count) != pde) || - ((last_pt + incr * count) != pt)) { + ((last_pt + incr * count) != pt) || + (count == AMDGPU_VM_MAX_UPDATE_SIZE)) { if (count) { - amdgpu_vm_update_pages(adev, &vm_update_params, - last_pde, last_pt, - count, incr, - AMDGPU_PTE_VALID); + amdgpu_vm_do_set_ptes(¶ms, last_pde, + last_pt, count, incr, + AMDGPU_PTE_VALID); } count = 1; @@ -673,15 +681,14 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, } if (count) - amdgpu_vm_update_pages(adev, &vm_update_params, - last_pde, last_pt, - count, incr, AMDGPU_PTE_VALID); + amdgpu_vm_do_set_ptes(¶ms, last_pde, last_pt, + count, incr, AMDGPU_PTE_VALID); - if (vm_update_params.ib->length_dw != 0) { - amdgpu_ring_pad_ib(ring, vm_update_params.ib); + if (params.ib->length_dw != 0) { + amdgpu_ring_pad_ib(ring, params.ib); amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); - WARN_ON(vm_update_params.ib->length_dw > ndw); + WARN_ON(params.ib->length_dw > ndw); r = amdgpu_job_submit(job, ring, &vm->entity, AMDGPU_FENCE_OWNER_VM, &fence); if (r) @@ -703,92 +710,33 @@ error_free: return r; } -/** - * amdgpu_vm_frag_ptes - add fragment information to PTEs +/* + * amdgpu_vm_update_pdes - make sure that page directory is valid * * @adev: amdgpu_device pointer - * @vm_update_params: see amdgpu_vm_update_params definition - * @pe_start: first PTE to handle - * @pe_end: last PTE to handle - * @addr: addr those PTEs should point to - * @flags: hw mapping flags + * @vm: requested vm + * @start: start of GPU address range + * @end: end of GPU address range + * + * Allocates new page tables if necessary + * and updates the page directory. + * Returns 0 for success, error for failure. */ -static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, - struct amdgpu_vm_update_params - *vm_update_params, - uint64_t pe_start, uint64_t pe_end, - uint64_t addr, uint32_t flags) +int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, + struct amdgpu_vm *vm) { - /** - * The MC L1 TLB supports variable sized pages, based on a fragment - * field in the PTE. When this field is set to a non-zero value, page - * granularity is increased from 4KB to (1 << (12 + frag)). The PTE - * flags are considered valid for all PTEs within the fragment range - * and corresponding mappings are assumed to be physically contiguous. - * - * The L1 TLB can store a single PTE for the whole fragment, - * significantly increasing the space available for translation - * caching. This leads to large improvements in throughput when the - * TLB is under pressure. - * - * The L2 TLB distributes small and large fragments into two - * asymmetric partitions. The large fragment cache is significantly - * larger. Thus, we try to use large fragments wherever possible. - * Userspace can support this by aligning virtual base address and - * allocation size to the fragment size. - */ - - /* SI and newer are optimized for 64KB */ - uint64_t frag_flags = AMDGPU_PTE_FRAG_64KB; - uint64_t frag_align = 0x80; - - uint64_t frag_start = ALIGN(pe_start, frag_align); - uint64_t frag_end = pe_end & ~(frag_align - 1); - - unsigned count; - - /* Abort early if there isn't anything to do */ - if (pe_start == pe_end) - return; - - /* system pages are non continuously */ - if (vm_update_params->src || vm_update_params->pages_addr || - !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { - - count = (pe_end - pe_start) / 8; - amdgpu_vm_update_pages(adev, vm_update_params, pe_start, - addr, count, AMDGPU_GPU_PAGE_SIZE, - flags); - return; - } - - /* handle the 4K area at the beginning */ - if (pe_start != frag_start) { - count = (frag_start - pe_start) / 8; - amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr, - count, AMDGPU_GPU_PAGE_SIZE, flags); - addr += AMDGPU_GPU_PAGE_SIZE * count; - } - - /* handle the area in the middle */ - count = (frag_end - frag_start) / 8; - amdgpu_vm_update_pages(adev, vm_update_params, frag_start, addr, count, - AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); + int r; - /* handle the 4K area at the end */ - if (frag_end != pe_end) { - addr += AMDGPU_GPU_PAGE_SIZE * count; - count = (pe_end - frag_end) / 8; - amdgpu_vm_update_pages(adev, vm_update_params, frag_end, addr, - count, AMDGPU_GPU_PAGE_SIZE, flags); - } + r = amdgpu_vm_update_pd_or_shadow(adev, vm, true); + if (r) + return r; + return amdgpu_vm_update_pd_or_shadow(adev, vm, false); } /** * amdgpu_vm_update_ptes - make sure that page tables are valid * - * @adev: amdgpu_device pointer - * @vm_update_params: see amdgpu_vm_update_params definition + * @params: see amdgpu_pte_update_params definition * @vm: requested vm * @start: start of GPU address range * @end: end of GPU address range @@ -797,16 +745,14 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, * * Update the page tables in the range @start - @end. */ -static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, - struct amdgpu_vm_update_params - *vm_update_params, +static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, struct amdgpu_vm *vm, uint64_t start, uint64_t end, uint64_t dst, uint32_t flags) { const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; - uint64_t cur_pe_start, cur_pe_end, cur_dst; + uint64_t cur_pe_start, cur_nptes, cur_dst; uint64_t addr; /* next GPU address to be updated */ uint64_t pt_idx; struct amdgpu_bo *pt; @@ -817,7 +763,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, addr = start; pt_idx = addr >> amdgpu_vm_block_size; pt = vm->page_tables[pt_idx].entry.robj; - + if (params->shadow) { + if (!pt->shadow) + return; + pt = vm->page_tables[pt_idx].entry.robj->shadow; + } if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; else @@ -825,7 +775,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, cur_pe_start = amdgpu_bo_gpu_offset(pt); cur_pe_start += (addr & mask) * 8; - cur_pe_end = cur_pe_start + 8 * nptes; + cur_nptes = nptes; cur_dst = dst; /* for next ptb*/ @@ -836,6 +786,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, while (addr < end) { pt_idx = addr >> amdgpu_vm_block_size; pt = vm->page_tables[pt_idx].entry.robj; + if (params->shadow) { + if (!pt->shadow) + return; + pt = vm->page_tables[pt_idx].entry.robj->shadow; + } if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; @@ -845,19 +800,19 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, next_pe_start = amdgpu_bo_gpu_offset(pt); next_pe_start += (addr & mask) * 8; - if (cur_pe_end == next_pe_start) { + if ((cur_pe_start + 8 * cur_nptes) == next_pe_start && + ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) { /* The next ptb is consecutive to current ptb. - * Don't call amdgpu_vm_frag_ptes now. + * Don't call the update function now. * Will update two ptbs together in future. */ - cur_pe_end += 8 * nptes; + cur_nptes += nptes; } else { - amdgpu_vm_frag_ptes(adev, vm_update_params, - cur_pe_start, cur_pe_end, - cur_dst, flags); + params->func(params, cur_pe_start, cur_dst, cur_nptes, + AMDGPU_GPU_PAGE_SIZE, flags); cur_pe_start = next_pe_start; - cur_pe_end = next_pe_start + 8 * nptes; + cur_nptes = nptes; cur_dst = dst; } @@ -866,8 +821,79 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, dst += nptes * AMDGPU_GPU_PAGE_SIZE; } - amdgpu_vm_frag_ptes(adev, vm_update_params, cur_pe_start, - cur_pe_end, cur_dst, flags); + params->func(params, cur_pe_start, cur_dst, cur_nptes, + AMDGPU_GPU_PAGE_SIZE, flags); +} + +/* + * amdgpu_vm_frag_ptes - add fragment information to PTEs + * + * @params: see amdgpu_pte_update_params definition + * @vm: requested vm + * @start: first PTE to handle + * @end: last PTE to handle + * @dst: addr those PTEs should point to + * @flags: hw mapping flags + */ +static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, + struct amdgpu_vm *vm, + uint64_t start, uint64_t end, + uint64_t dst, uint32_t flags) +{ + /** + * The MC L1 TLB supports variable sized pages, based on a fragment + * field in the PTE. When this field is set to a non-zero value, page + * granularity is increased from 4KB to (1 << (12 + frag)). The PTE + * flags are considered valid for all PTEs within the fragment range + * and corresponding mappings are assumed to be physically contiguous. + * + * The L1 TLB can store a single PTE for the whole fragment, + * significantly increasing the space available for translation + * caching. This leads to large improvements in throughput when the + * TLB is under pressure. + * + * The L2 TLB distributes small and large fragments into two + * asymmetric partitions. The large fragment cache is significantly + * larger. Thus, we try to use large fragments wherever possible. + * Userspace can support this by aligning virtual base address and + * allocation size to the fragment size. + */ + + const uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG; + + uint64_t frag_start = ALIGN(start, frag_align); + uint64_t frag_end = end & ~(frag_align - 1); + + uint32_t frag; + + /* system pages are non continuously */ + if (params->src || !(flags & AMDGPU_PTE_VALID) || + (frag_start >= frag_end)) { + + amdgpu_vm_update_ptes(params, vm, start, end, dst, flags); + return; + } + + /* use more than 64KB fragment size if possible */ + frag = lower_32_bits(frag_start | frag_end); + frag = likely(frag) ? __ffs(frag) : 31; + + /* handle the 4K area at the beginning */ + if (start != frag_start) { + amdgpu_vm_update_ptes(params, vm, start, frag_start, + dst, flags); + dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE; + } + + /* handle the area in the middle */ + amdgpu_vm_update_ptes(params, vm, frag_start, frag_end, dst, + flags | AMDGPU_PTE_FRAG(frag)); + + /* handle the 4K area at the end */ + if (frag_end != end) { + dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE; + amdgpu_vm_update_ptes(params, vm, frag_end, end, dst, flags); + } } /** @@ -900,14 +926,19 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, void *owner = AMDGPU_FENCE_OWNER_VM; unsigned nptes, ncmds, ndw; struct amdgpu_job *job; - struct amdgpu_vm_update_params vm_update_params; + struct amdgpu_pte_update_params params; struct fence *f = NULL; int r; + memset(¶ms, 0, sizeof(params)); + params.adev = adev; + params.src = src; + ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); - memset(&vm_update_params, 0, sizeof(vm_update_params)); - vm_update_params.src = src; - vm_update_params.pages_addr = pages_addr; + + memset(¶ms, 0, sizeof(params)); + params.adev = adev; + params.src = src; /* sync to everything on unmapping */ if (!(flags & AMDGPU_PTE_VALID)) @@ -924,30 +955,52 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, /* padding, etc. */ ndw = 64; - if (vm_update_params.src) { + if (src) { /* only copy commands needed */ ndw += ncmds * 7; - } else if (vm_update_params.pages_addr) { - /* header for write data commands */ - ndw += ncmds * 4; + params.func = amdgpu_vm_do_copy_ptes; + + } else if (pages_addr) { + /* copy commands needed */ + ndw += ncmds * 7; - /* body of write data command */ + /* and also PTEs */ ndw += nptes * 2; + params.func = amdgpu_vm_do_copy_ptes; + } else { /* set page commands needed */ ndw += ncmds * 10; /* two extra commands for begin/end of fragment */ ndw += 2 * 10; + + params.func = amdgpu_vm_do_set_ptes; } r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); if (r) return r; - vm_update_params.ib = &job->ibs[0]; + params.ib = &job->ibs[0]; + + if (!src && pages_addr) { + uint64_t *pte; + unsigned i; + + /* Put the PTEs at the end of the IB. */ + i = ndw - nptes * 2; + pte= (uint64_t *)&(job->ibs->ptr[i]); + params.src = job->ibs->gpu_addr + i * 4; + + for (i = 0; i < nptes; ++i) { + pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i * + AMDGPU_GPU_PAGE_SIZE); + pte[i] |= flags; + } + } r = amdgpu_sync_fence(adev, &job->sync, exclusive); if (r) @@ -962,11 +1015,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; - amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start, - last + 1, addr, flags); + params.shadow = true; + amdgpu_vm_frag_ptes(¶ms, vm, start, last + 1, addr, flags); + params.shadow = false; + amdgpu_vm_frag_ptes(¶ms, vm, start, last + 1, addr, flags); - amdgpu_ring_pad_ib(ring, vm_update_params.ib); - WARN_ON(vm_update_params.ib->length_dw > ndw); + amdgpu_ring_pad_ib(ring, params.ib); + WARN_ON(params.ib->length_dw > ndw); r = amdgpu_job_submit(job, ring, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f); if (r) @@ -1062,28 +1117,32 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, * * @adev: amdgpu_device pointer * @bo_va: requested BO and VM object - * @mem: ttm mem + * @clear: if true clear the entries * * Fill in the page table entries for @bo_va. * Returns 0 for success, -EINVAL for failure. - * - * Object have to be reserved and mutex must be locked! */ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, - struct ttm_mem_reg *mem) + bool clear) { struct amdgpu_vm *vm = bo_va->vm; struct amdgpu_bo_va_mapping *mapping; dma_addr_t *pages_addr = NULL; uint32_t gtt_flags, flags; + struct ttm_mem_reg *mem; struct fence *exclusive; uint64_t addr; int r; - if (mem) { + if (clear) { + mem = NULL; + addr = 0; + exclusive = NULL; + } else { struct ttm_dma_tt *ttm; + mem = &bo_va->bo->tbo.mem; addr = (u64)mem->start << PAGE_SHIFT; switch (mem->mem_type) { case TTM_PL_TT: @@ -1101,9 +1160,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, } exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv); - } else { - addr = 0; - exclusive = NULL; } flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); @@ -1134,7 +1190,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, spin_lock(&vm->status_lock); list_splice_init(&bo_va->invalids, &bo_va->valids); list_del_init(&bo_va->vm_status); - if (!mem) + if (clear) list_add(&bo_va->vm_status, &vm->cleared); spin_unlock(&vm->status_lock); @@ -1197,7 +1253,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_bo_va, vm_status); spin_unlock(&vm->status_lock); - r = amdgpu_vm_bo_update(adev, bo_va, NULL); + r = amdgpu_vm_bo_update(adev, bo_va, true); if (r) return r; @@ -1342,7 +1398,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, AMDGPU_GPU_PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_NO_CPU_ACCESS, + AMDGPU_GEM_CREATE_NO_CPU_ACCESS | + AMDGPU_GEM_CREATE_SHADOW, NULL, resv, &pt); if (r) goto error_free; @@ -1541,7 +1598,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) r = amdgpu_bo_create(adev, pd_size, align, true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_NO_CPU_ACCESS, + AMDGPU_GEM_CREATE_NO_CPU_ACCESS | + AMDGPU_GEM_CREATE_SHADOW, NULL, NULL, &vm->page_directory); if (r) goto error_free_sched_entity; @@ -1597,10 +1655,16 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) kfree(mapping); } - for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) + for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) { + if (vm->page_tables[i].entry.robj && + vm->page_tables[i].entry.robj->shadow) + amdgpu_bo_unref(&vm->page_tables[i].entry.robj->shadow); amdgpu_bo_unref(&vm->page_tables[i].entry.robj); + } drm_free_large(vm->page_tables); + if (vm->page_directory->shadow) + amdgpu_bo_unref(&vm->page_directory->shadow); amdgpu_bo_unref(&vm->page_directory); fence_put(vm->page_directory_fence); } diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c index 7f85c2c1d681..f81068ba4cc6 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c @@ -88,7 +88,6 @@ static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan, /* timeout */ if (args.v2.ucReplyStatus == 1) { - DRM_DEBUG_KMS("dp_aux_ch timeout\n"); r = -ETIMEDOUT; goto done; } @@ -339,22 +338,21 @@ int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector) { struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv; u8 msg[DP_DPCD_SIZE]; - int ret, i; + int ret; - for (i = 0; i < 7; i++) { - ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV, msg, - DP_DPCD_SIZE); - if (ret == DP_DPCD_SIZE) { - memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); + ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux, DP_DPCD_REV, + msg, DP_DPCD_SIZE); + if (ret == DP_DPCD_SIZE) { + memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); - DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), - dig_connector->dpcd); + DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), + dig_connector->dpcd); - amdgpu_atombios_dp_probe_oui(amdgpu_connector); + amdgpu_atombios_dp_probe_oui(amdgpu_connector); - return 0; - } + return 0; } + dig_connector->dpcd[0] = 0; return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index e2f0e5d58d5c..a0d63a293bb0 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -5779,6 +5779,7 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev) break; case CHIP_KAVERI: case CHIP_KABINI: + case CHIP_MULLINS: default: BUG(); } @@ -5873,7 +5874,10 @@ static int ci_dpm_init(struct amdgpu_device *adev) pi->pcie_dpm_key_disabled = 0; pi->thermal_sclk_dpm_enabled = 0; - pi->caps_sclk_ds = true; + if (amdgpu_sclk_deep_sleep_en) + pi->caps_sclk_ds = true; + else + pi->caps_sclk_ds = false; pi->mclk_strobe_mode_threshold = 40000; pi->mclk_stutter_mode_threshold = 40000; diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 4efc901f658c..825de800b798 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -67,6 +67,7 @@ #include "amdgpu_amdkfd.h" #include "amdgpu_powerplay.h" +#include "dce_virtual.h" /* * Indirect registers accessor @@ -1708,6 +1709,74 @@ static const struct amdgpu_ip_block_version bonaire_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version bonaire_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 2, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 2, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version hawaii_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1776,6 +1845,74 @@ static const struct amdgpu_ip_block_version hawaii_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version hawaii_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 5, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 3, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version kabini_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1844,6 +1981,74 @@ static const struct amdgpu_ip_block_version kabini_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version kabini_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 3, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 2, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version mullins_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1912,6 +2117,74 @@ static const struct amdgpu_ip_block_version mullins_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version mullins_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 3, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 2, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version kaveri_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1980,32 +2253,128 @@ static const struct amdgpu_ip_block_version kaveri_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version kaveri_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &cik_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 8, + .minor = 1, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 7, + .minor = 1, + .rev = 0, + .funcs = &gfx_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &cik_sdma_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 4, + .minor = 2, + .rev = 0, + .funcs = &uvd_v4_2_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vce_v2_0_ip_funcs, + }, +}; + int cik_set_ip_blocks(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_BONAIRE: - adev->ip_blocks = bonaire_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks); - break; - case CHIP_HAWAII: - adev->ip_blocks = hawaii_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks); - break; - case CHIP_KAVERI: - adev->ip_blocks = kaveri_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks); - break; - case CHIP_KABINI: - adev->ip_blocks = kabini_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks); - break; - case CHIP_MULLINS: - adev->ip_blocks = mullins_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks); - break; - default: - /* FIXME: not supported yet */ - return -EINVAL; + if (adev->enable_virtual_display) { + switch (adev->asic_type) { + case CHIP_BONAIRE: + adev->ip_blocks = bonaire_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks_vd); + break; + case CHIP_HAWAII: + adev->ip_blocks = hawaii_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks_vd); + break; + case CHIP_KAVERI: + adev->ip_blocks = kaveri_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks_vd); + break; + case CHIP_KABINI: + adev->ip_blocks = kabini_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks_vd); + break; + case CHIP_MULLINS: + adev->ip_blocks = mullins_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks_vd); + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } + } else { + switch (adev->asic_type) { + case CHIP_BONAIRE: + adev->ip_blocks = bonaire_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks); + break; + case CHIP_HAWAII: + adev->ip_blocks = hawaii_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks); + break; + case CHIP_KAVERI: + adev->ip_blocks = kaveri_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks); + break; + case CHIP_KABINI: + adev->ip_blocks = kabini_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks); + break; + case CHIP_MULLINS: + adev->ip_blocks = mullins_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks); + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index ee6466912497..e71cd12104b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -694,24 +694,16 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count) { - while (count) { - unsigned bytes = count * 8; - if (bytes > 0x1FFFF8) - bytes = 0x1FFFF8; - - ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, - SDMA_WRITE_SUB_OPCODE_LINEAR, 0); - ib->ptr[ib->length_dw++] = bytes; - ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ - ib->ptr[ib->length_dw++] = lower_32_bits(src); - ib->ptr[ib->length_dw++] = upper_32_bits(src); - ib->ptr[ib->length_dw++] = lower_32_bits(pe); - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - - pe += bytes; - src += bytes; - count -= bytes / 8; - } + unsigned bytes = count * 8; + + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, + SDMA_WRITE_SUB_OPCODE_LINEAR, 0); + ib->ptr[ib->length_dw++] = bytes; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(src); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); } /** @@ -719,39 +711,27 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib, * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry - * @addr: dst addr to write into pe + * @value: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes - * @flags: access flags * * Update PTEs by writing them manually using sDMA (CIK). */ -static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, - const dma_addr_t *pages_addr, uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags) +static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, + uint64_t value, unsigned count, + uint32_t incr) { - uint64_t value; - unsigned ndw; - - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - /* for non-physically contiguous pages (system) */ - ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, - SDMA_WRITE_SUB_OPCODE_LINEAR, 0); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = ndw; - for (; ndw > 0; ndw -= 2, --count, pe += 8) { - value = amdgpu_vm_map_gart(pages_addr, addr); - addr += incr; - value |= flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } + unsigned ndw = count * 2; + + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, + SDMA_WRITE_SUB_OPCODE_LINEAR, 0); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = ndw; + for (; ndw > 0; ndw -= 2) { + ib->ptr[ib->length_dw++] = lower_32_bits(value); + ib->ptr[ib->length_dw++] = upper_32_bits(value); + value += incr; } } @@ -767,40 +747,21 @@ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, * * Update the page tables using sDMA (CIK). */ -static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, - uint64_t pe, +static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { - uint64_t value; - unsigned ndw; - - while (count) { - ndw = count; - if (ndw > 0x7FFFF) - ndw = 0x7FFFF; - - if (flags & AMDGPU_PTE_VALID) - value = addr; - else - value = 0; - - /* for physically contiguous pages (vram) */ - ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); - ib->ptr[ib->length_dw++] = pe; /* dst addr */ - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = flags; /* mask */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = value; /* value */ - ib->ptr[ib->length_dw++] = upper_32_bits(value); - ib->ptr[ib->length_dw++] = incr; /* increment size */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = ndw; /* number of entries */ - - pe += ndw * 8; - addr += ndw * incr; - count -= ndw; - } + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = count; /* number of entries */ } /** diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index 2a11413ed54a..794c5f36ca68 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -435,7 +435,11 @@ static int cz_dpm_init(struct amdgpu_device *adev) pi->caps_td_ramping = true; pi->caps_tcp_ramping = true; } - pi->caps_sclk_ds = true; + if (amdgpu_sclk_deep_sleep_en) + pi->caps_sclk_ds = true; + else + pi->caps_sclk_ds = false; + pi->voting_clients = 0x00c00033; pi->auto_thermal_throttling_enabled = true; pi->bapm_enabled = false; @@ -2108,29 +2112,58 @@ static void cz_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) /* disable clockgating so we can properly shut down the block */ ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, AMD_CG_STATE_UNGATE); + if (ret) { + DRM_ERROR("UVD DPM Power Gating failed to set clockgating state\n"); + return; + } + /* shutdown the UVD block */ ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, AMD_PG_STATE_GATE); - /* XXX: check for errors */ + + if (ret) { + DRM_ERROR("UVD DPM Power Gating failed to set powergating state\n"); + return; + } } cz_update_uvd_dpm(adev, gate); - if (pi->caps_uvd_pg) + if (pi->caps_uvd_pg) { /* power off the UVD block */ - cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF); + ret = cz_send_msg_to_smc(adev, PPSMC_MSG_UVDPowerOFF); + if (ret) { + DRM_ERROR("UVD DPM Power Gating failed to send SMU PowerOFF message\n"); + return; + } + } } else { if (pi->caps_uvd_pg) { /* power on the UVD block */ if (pi->uvd_dynamic_pg) - cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1); + ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 1); else - cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0); + ret = cz_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_UVDPowerON, 0); + + if (ret) { + DRM_ERROR("UVD DPM Power Gating Failed to send SMU PowerON message\n"); + return; + } + /* re-init the UVD block */ ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, AMD_PG_STATE_UNGATE); + + if (ret) { + DRM_ERROR("UVD DPM Power Gating Failed to set powergating state\n"); + return; + } + /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */ ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, AMD_CG_STATE_GATE); - /* XXX: check for errors */ + if (ret) { + DRM_ERROR("UVD DPM Power Gating Failed to set clockgating state\n"); + return; + } } cz_update_uvd_dpm(adev, gate); } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index c1b04e9aab57..bc5bb4eb9625 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -646,8 +646,8 @@ static void dce_v10_0_resume_mc_access(struct amdgpu_device *adev, if (save->crtc_enabled[i]) { tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3); + if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 0) { + tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 0); WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp); } tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); @@ -712,6 +712,45 @@ static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev, WREG32(mmVGA_RENDER_CONTROL, tmp); } +static int dce_v10_0_get_num_crtc(struct amdgpu_device *adev) +{ + int num_crtc = 0; + + switch (adev->asic_type) { + case CHIP_FIJI: + case CHIP_TONGA: + num_crtc = 6; + break; + default: + num_crtc = 0; + } + return num_crtc; +} + +void dce_v10_0_disable_dce(struct amdgpu_device *adev) +{ + /*Disable VGA render and enabled crtc, if has DCE engine*/ + if (amdgpu_atombios_has_dce_engine_info(adev)) { + u32 tmp; + int crtc_enabled, i; + + dce_v10_0_set_vga_render_state(adev, false); + + /*Disable crtc*/ + for (i = 0; i < dce_v10_0_get_num_crtc(adev); i++) { + crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), + CRTC_CONTROL, CRTC_MASTER_EN); + if (crtc_enabled) { + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); + tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); + WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + } + } + } +} + static void dce_v10_0_program_fmt(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; @@ -2071,6 +2110,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, u32 tmp, viewport_w, viewport_h; int r; bool bypass_lut = false; + char *format_name; /* no fb bound */ if (!atomic && !crtc->primary->fb) { @@ -2182,8 +2222,9 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, bypass_lut = true; break; default: - DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->pixel_format)); + format_name = drm_get_format_name(target_fb->pixel_format); + DRM_ERROR("Unsupported screen format %s\n", format_name); + kfree(format_name); return -EINVAL; } @@ -2275,8 +2316,8 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, (viewport_w << 16) | viewport_h); - /* set pageflip to happen only at start of vblank interval (front porch) */ - WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); + /* set pageflip to happen anywhere in vblank interval */ + WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->primary->fb) { amdgpu_fb = to_amdgpu_framebuffer(fb); @@ -2698,7 +2739,7 @@ static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = { .gamma_set = dce_v10_0_crtc_gamma_set, .set_config = amdgpu_crtc_set_config, .destroy = dce_v10_0_crtc_destroy, - .page_flip = amdgpu_crtc_page_flip, + .page_flip_target = amdgpu_crtc_page_flip_target, }; static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) @@ -2962,10 +3003,11 @@ static int dce_v10_0_early_init(void *handle) dce_v10_0_set_display_funcs(adev); dce_v10_0_set_irq_funcs(adev); + adev->mode_info.num_crtc = dce_v10_0_get_num_crtc(adev); + switch (adev->asic_type) { case CHIP_FIJI: case CHIP_TONGA: - adev->mode_info.num_crtc = 6; /* XXX 7??? */ adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 7; break; @@ -3141,11 +3183,26 @@ static int dce_v10_0_wait_for_idle(void *handle) return 0; } +static int dce_v10_0_check_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (dce_v10_0_is_display_hung(adev)) + adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = true; + else + adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = false; + + return 0; +} + static int dce_v10_0_soft_reset(void *handle) { u32 srbm_soft_reset = 0, tmp; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) + return 0; + if (dce_v10_0_is_display_hung(adev)) srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; @@ -3512,6 +3569,7 @@ const struct amd_ip_funcs dce_v10_0_ip_funcs = { .resume = dce_v10_0_resume, .is_idle = dce_v10_0_is_idle, .wait_for_idle = dce_v10_0_wait_for_idle, + .check_soft_reset = dce_v10_0_check_soft_reset, .soft_reset = dce_v10_0_soft_reset, .set_clockgating_state = dce_v10_0_set_clockgating_state, .set_powergating_state = dce_v10_0_set_powergating_state, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h index 1bfa48ddd8a6..e3dc04d293e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h @@ -26,4 +26,6 @@ extern const struct amd_ip_funcs dce_v10_0_ip_funcs; +void dce_v10_0_disable_dce(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index d4bf133908b1..b93eba077950 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -673,6 +673,53 @@ static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev, WREG32(mmVGA_RENDER_CONTROL, tmp); } +static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev) +{ + int num_crtc = 0; + + switch (adev->asic_type) { + case CHIP_CARRIZO: + num_crtc = 3; + break; + case CHIP_STONEY: + num_crtc = 2; + break; + case CHIP_POLARIS10: + num_crtc = 6; + break; + case CHIP_POLARIS11: + num_crtc = 5; + break; + default: + num_crtc = 0; + } + return num_crtc; +} + +void dce_v11_0_disable_dce(struct amdgpu_device *adev) +{ + /*Disable VGA render and enabled crtc, if has DCE engine*/ + if (amdgpu_atombios_has_dce_engine_info(adev)) { + u32 tmp; + int crtc_enabled, i; + + dce_v11_0_set_vga_render_state(adev, false); + + /*Disable crtc*/ + for (i = 0; i < dce_v11_0_get_num_crtc(adev); i++) { + crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), + CRTC_CONTROL, CRTC_MASTER_EN); + if (crtc_enabled) { + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); + tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); + WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + } + } + } +} + static void dce_v11_0_program_fmt(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; @@ -2046,6 +2093,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, u32 tmp, viewport_w, viewport_h; int r; bool bypass_lut = false; + char *format_name; /* no fb bound */ if (!atomic && !crtc->primary->fb) { @@ -2157,8 +2205,9 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, bypass_lut = true; break; default: - DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->pixel_format)); + format_name = drm_get_format_name(target_fb->pixel_format); + DRM_ERROR("Unsupported screen format %s\n", format_name); + kfree(format_name); return -EINVAL; } @@ -2250,8 +2299,8 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, (viewport_w << 16) | viewport_h); - /* set pageflip to happen only at start of vblank interval (front porch) */ - WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); + /* set pageflip to happen anywhere in vblank interval */ + WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->primary->fb) { amdgpu_fb = to_amdgpu_framebuffer(fb); @@ -2708,7 +2757,7 @@ static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = { .gamma_set = dce_v11_0_crtc_gamma_set, .set_config = amdgpu_crtc_set_config, .destroy = dce_v11_0_crtc_destroy, - .page_flip = amdgpu_crtc_page_flip, + .page_flip_target = amdgpu_crtc_page_flip_target, }; static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) @@ -2999,24 +3048,22 @@ static int dce_v11_0_early_init(void *handle) dce_v11_0_set_display_funcs(adev); dce_v11_0_set_irq_funcs(adev); + adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev); + switch (adev->asic_type) { case CHIP_CARRIZO: - adev->mode_info.num_crtc = 3; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 9; break; case CHIP_STONEY: - adev->mode_info.num_crtc = 2; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 9; break; case CHIP_POLARIS10: - adev->mode_info.num_crtc = 6; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; break; case CHIP_POLARIS11: - adev->mode_info.num_crtc = 5; adev->mode_info.num_hpd = 5; adev->mode_info.num_dig = 5; break; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h index 84e4618f5253..1f58a65ba2ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h @@ -26,4 +26,6 @@ extern const struct amd_ip_funcs dce_v11_0_ip_funcs; +void dce_v11_0_disable_dce(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 4fdfab1e9200..abd5213dfe18 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -604,6 +604,52 @@ static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev, WREG32(mmVGA_RENDER_CONTROL, tmp); } +static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev) +{ + int num_crtc = 0; + + switch (adev->asic_type) { + case CHIP_BONAIRE: + case CHIP_HAWAII: + num_crtc = 6; + break; + case CHIP_KAVERI: + num_crtc = 4; + break; + case CHIP_KABINI: + case CHIP_MULLINS: + num_crtc = 2; + break; + default: + num_crtc = 0; + } + return num_crtc; +} + +void dce_v8_0_disable_dce(struct amdgpu_device *adev) +{ + /*Disable VGA render and enabled crtc, if has DCE engine*/ + if (amdgpu_atombios_has_dce_engine_info(adev)) { + u32 tmp; + int crtc_enabled, i; + + dce_v8_0_set_vga_render_state(adev, false); + + /*Disable crtc*/ + for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) { + crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), + CRTC_CONTROL, CRTC_MASTER_EN); + if (crtc_enabled) { + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); + tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); + WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); + WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); + } + } + } +} + static void dce_v8_0_program_fmt(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; @@ -1501,13 +1547,13 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder) if (sad->format == eld_reg_to_type[i][1]) { if (sad->channels > max_channels) { - value = (sad->channels << - AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) | - (sad->byte2 << - AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) | - (sad->freq << - AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT); - max_channels = sad->channels; + value = (sad->channels << + AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) | + (sad->byte2 << + AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) | + (sad->freq << + AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT); + max_channels = sad->channels; } if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) @@ -1613,7 +1659,7 @@ static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; uint32_t offset = dig->afmt->offset; - WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT)); + WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT)); WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz); WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT)); @@ -1693,6 +1739,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder, /* Silent, r600_hdmi_enable will raise WARN for us */ if (!dig->afmt->enabled) return; + offset = dig->afmt->offset; /* hdmi deep color mode general control packets setup, if bpc > 8 */ @@ -1817,7 +1864,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder, WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset, HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */ - HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK); /* required for audio info values to be updated */ + HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK); /* required for audio info values to be updated */ WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset, (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */ @@ -1826,13 +1873,12 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder, WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset, AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */ - /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */ WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF); WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF); WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001); WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001); - /* enable audio after to setting up hw */ + /* enable audio after setting up hw */ dce_v8_0_audio_enable(adev, dig->afmt->pin, true); } @@ -1952,6 +1998,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, u32 viewport_w, viewport_h; int r; bool bypass_lut = false; + char *format_name; /* no fb bound */ if (!atomic && !crtc->primary->fb) { @@ -1999,7 +2046,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, case DRM_FORMAT_XRGB4444: case DRM_FORMAT_ARGB4444: fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | - (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); + (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); #ifdef __BIG_ENDIAN fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); #endif @@ -2056,8 +2103,9 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, bypass_lut = true; break; default: - DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->pixel_format)); + format_name = drm_get_format_name(target_fb->pixel_format); + DRM_ERROR("Unsupported screen format %s\n", format_name); + kfree(format_name); return -EINVAL; } @@ -2137,8 +2185,8 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, (viewport_w << 16) | viewport_h); - /* set pageflip to happen only at start of vblank interval (front porch) */ - WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); + /* set pageflip to happen anywhere in vblank interval */ + WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->primary->fb) { amdgpu_fb = to_amdgpu_framebuffer(fb); @@ -2552,7 +2600,7 @@ static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = { .gamma_set = dce_v8_0_crtc_gamma_set, .set_config = amdgpu_crtc_set_config, .destroy = dce_v8_0_crtc_destroy, - .page_flip = amdgpu_crtc_page_flip, + .page_flip_target = amdgpu_crtc_page_flip_target, }; static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) @@ -2653,7 +2701,7 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc) case ATOM_PPLL2: /* disable the ppll */ amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, - 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); + 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); break; case ATOM_PPLL0: /* disable the ppll */ @@ -2803,21 +2851,20 @@ static int dce_v8_0_early_init(void *handle) dce_v8_0_set_display_funcs(adev); dce_v8_0_set_irq_funcs(adev); + adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev); + switch (adev->asic_type) { case CHIP_BONAIRE: case CHIP_HAWAII: - adev->mode_info.num_crtc = 6; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; break; case CHIP_KAVERI: - adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 7; break; case CHIP_KABINI: case CHIP_MULLINS: - adev->mode_info.num_crtc = 2; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; /* ? */ break; @@ -3236,7 +3283,6 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev, drm_handle_vblank(adev->ddev, crtc); } DRM_DEBUG("IH: D%d vblank\n", crtc + 1); - break; case 1: /* vline */ if (disp_int & interrupt_status_offsets[crtc].vline) @@ -3245,7 +3291,6 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev, DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); DRM_DEBUG("IH: D%d vline\n", crtc + 1); - break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h index 77016852b252..7d0770c3a49b 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h @@ -26,4 +26,6 @@ extern const struct amd_ip_funcs dce_v8_0_ip_funcs; +void dce_v8_0_disable_dce(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c new file mode 100644 index 000000000000..00663a7b4053 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -0,0 +1,806 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "drmP.h" +#include "amdgpu.h" +#include "amdgpu_pm.h" +#include "amdgpu_i2c.h" +#include "atom.h" +#include "amdgpu_pll.h" +#include "amdgpu_connectors.h" +#ifdef CONFIG_DRM_AMDGPU_CIK +#include "dce_v8_0.h" +#endif +#include "dce_v10_0.h" +#include "dce_v11_0.h" +#include "dce_virtual.h" + +static void dce_virtual_set_display_funcs(struct amdgpu_device *adev); +static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev); +static int dce_virtual_pageflip_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry); + +/** + * dce_virtual_vblank_wait - vblank wait asic callback. + * + * @adev: amdgpu_device pointer + * @crtc: crtc to wait for vblank on + * + * Wait for vblank on the requested crtc (evergreen+). + */ +static void dce_virtual_vblank_wait(struct amdgpu_device *adev, int crtc) +{ + return; +} + +static u32 dce_virtual_vblank_get_counter(struct amdgpu_device *adev, int crtc) +{ + return 0; +} + +static void dce_virtual_page_flip(struct amdgpu_device *adev, + int crtc_id, u64 crtc_base, bool async) +{ + return; +} + +static int dce_virtual_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, + u32 *vbl, u32 *position) +{ + *vbl = 0; + *position = 0; + + return -EINVAL; +} + +static bool dce_virtual_hpd_sense(struct amdgpu_device *adev, + enum amdgpu_hpd_id hpd) +{ + return true; +} + +static void dce_virtual_hpd_set_polarity(struct amdgpu_device *adev, + enum amdgpu_hpd_id hpd) +{ + return; +} + +static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev) +{ + return 0; +} + +static bool dce_virtual_is_display_hung(struct amdgpu_device *adev) +{ + return false; +} + +void dce_virtual_stop_mc_access(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + switch (adev->asic_type) { + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_MULLINS: +#ifdef CONFIG_DRM_AMDGPU_CIK + dce_v8_0_disable_dce(adev); +#endif + break; + case CHIP_FIJI: + case CHIP_TONGA: + dce_v10_0_disable_dce(adev); + break; + case CHIP_CARRIZO: + case CHIP_STONEY: + case CHIP_POLARIS11: + case CHIP_POLARIS10: + dce_v11_0_disable_dce(adev); + break; + case CHIP_TOPAZ: + /* no DCE */ + return; + default: + DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type); + } + + return; +} +void dce_virtual_resume_mc_access(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) +{ + return; +} + +void dce_virtual_set_vga_render_state(struct amdgpu_device *adev, + bool render) +{ + return; +} + +/** + * dce_virtual_bandwidth_update - program display watermarks + * + * @adev: amdgpu_device pointer + * + * Calculate and program the display watermarks and line + * buffer allocation (CIK). + */ +static void dce_virtual_bandwidth_update(struct amdgpu_device *adev) +{ + return; +} + +static int dce_virtual_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, + u16 *green, u16 *blue, uint32_t size) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + int i; + + /* userspace palettes are always correct as is */ + for (i = 0; i < size; i++) { + amdgpu_crtc->lut_r[i] = red[i] >> 6; + amdgpu_crtc->lut_g[i] = green[i] >> 6; + amdgpu_crtc->lut_b[i] = blue[i] >> 6; + } + + return 0; +} + +static void dce_virtual_crtc_destroy(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + drm_crtc_cleanup(crtc); + kfree(amdgpu_crtc); +} + +static const struct drm_crtc_funcs dce_virtual_crtc_funcs = { + .cursor_set2 = NULL, + .cursor_move = NULL, + .gamma_set = dce_virtual_crtc_gamma_set, + .set_config = amdgpu_crtc_set_config, + .destroy = dce_virtual_crtc_destroy, + .page_flip_target = amdgpu_crtc_page_flip_target, +}; + +static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct drm_device *dev = crtc->dev; + struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + unsigned type; + + switch (mode) { + case DRM_MODE_DPMS_ON: + amdgpu_crtc->enabled = true; + /* Make sure VBLANK and PFLIP interrupts are still enabled */ + type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); + amdgpu_irq_update(adev, &adev->crtc_irq, type); + amdgpu_irq_update(adev, &adev->pageflip_irq, type); + drm_vblank_on(dev, amdgpu_crtc->crtc_id); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + drm_vblank_off(dev, amdgpu_crtc->crtc_id); + amdgpu_crtc->enabled = false; + break; + } +} + + +static void dce_virtual_crtc_prepare(struct drm_crtc *crtc) +{ + dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); +} + +static void dce_virtual_crtc_commit(struct drm_crtc *crtc) +{ + dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_ON); +} + +static void dce_virtual_crtc_disable(struct drm_crtc *crtc) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + if (crtc->primary->fb) { + int r; + struct amdgpu_framebuffer *amdgpu_fb; + struct amdgpu_bo *rbo; + + amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); + rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r)) + DRM_ERROR("failed to reserve rbo before unpin\n"); + else { + amdgpu_bo_unpin(rbo); + amdgpu_bo_unreserve(rbo); + } + } + + amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; +} + +static int dce_virtual_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, struct drm_framebuffer *old_fb) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + + /* update the hw version fpr dpm */ + amdgpu_crtc->hw_mode = *adjusted_mode; + + return 0; +} + +static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct drm_encoder *encoder; + + /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + amdgpu_crtc->encoder = encoder; + amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); + break; + } + } + if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; + return false; + } + + return true; +} + + +static int dce_virtual_crtc_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + return 0; +} + +static void dce_virtual_crtc_load_lut(struct drm_crtc *crtc) +{ + return; +} + +static int dce_virtual_crtc_set_base_atomic(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int x, int y, enum mode_set_atomic state) +{ + return 0; +} + +static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = { + .dpms = dce_virtual_crtc_dpms, + .mode_fixup = dce_virtual_crtc_mode_fixup, + .mode_set = dce_virtual_crtc_mode_set, + .mode_set_base = dce_virtual_crtc_set_base, + .mode_set_base_atomic = dce_virtual_crtc_set_base_atomic, + .prepare = dce_virtual_crtc_prepare, + .commit = dce_virtual_crtc_commit, + .load_lut = dce_virtual_crtc_load_lut, + .disable = dce_virtual_crtc_disable, +}; + +static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index) +{ + struct amdgpu_crtc *amdgpu_crtc; + int i; + + amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + + (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); + if (amdgpu_crtc == NULL) + return -ENOMEM; + + drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_virtual_crtc_funcs); + + drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); + amdgpu_crtc->crtc_id = index; + adev->mode_info.crtcs[index] = amdgpu_crtc; + + for (i = 0; i < 256; i++) { + amdgpu_crtc->lut_r[i] = i << 2; + amdgpu_crtc->lut_g[i] = i << 2; + amdgpu_crtc->lut_b[i] = i << 2; + } + + amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; + drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs); + + return 0; +} + +static int dce_virtual_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE; + dce_virtual_set_display_funcs(adev); + dce_virtual_set_irq_funcs(adev); + + adev->mode_info.num_crtc = 1; + adev->mode_info.num_hpd = 1; + adev->mode_info.num_dig = 1; + return 0; +} + +static bool dce_virtual_get_connector_info(struct amdgpu_device *adev) +{ + struct amdgpu_i2c_bus_rec ddc_bus; + struct amdgpu_router router; + struct amdgpu_hpd hpd; + + /* look up gpio for ddc, hpd */ + ddc_bus.valid = false; + hpd.hpd = AMDGPU_HPD_NONE; + /* needed for aux chan transactions */ + ddc_bus.hpd = hpd.hpd; + + memset(&router, 0, sizeof(router)); + router.ddc_valid = false; + router.cd_valid = false; + amdgpu_display_add_connector(adev, + 0, + ATOM_DEVICE_CRT1_SUPPORT, + DRM_MODE_CONNECTOR_VIRTUAL, &ddc_bus, + CONNECTOR_OBJECT_ID_VIRTUAL, + &hpd, + &router); + + amdgpu_display_add_encoder(adev, ENCODER_VIRTUAL_ENUM_VIRTUAL, + ATOM_DEVICE_CRT1_SUPPORT, + 0); + + amdgpu_link_encoder_connector(adev->ddev); + + return true; +} + +static int dce_virtual_sw_init(void *handle) +{ + int r, i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = amdgpu_irq_add_id(adev, 229, &adev->crtc_irq); + if (r) + return r; + + adev->ddev->max_vblank_count = 0; + + adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; + + adev->ddev->mode_config.max_width = 16384; + adev->ddev->mode_config.max_height = 16384; + + adev->ddev->mode_config.preferred_depth = 24; + adev->ddev->mode_config.prefer_shadow = 1; + + adev->ddev->mode_config.fb_base = adev->mc.aper_base; + + r = amdgpu_modeset_create_props(adev); + if (r) + return r; + + adev->ddev->mode_config.max_width = 16384; + adev->ddev->mode_config.max_height = 16384; + + /* allocate crtcs */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + r = dce_virtual_crtc_init(adev, i); + if (r) + return r; + } + + dce_virtual_get_connector_info(adev); + amdgpu_print_display_setup(adev->ddev); + + drm_kms_helper_poll_init(adev->ddev); + + adev->mode_info.mode_config_initialized = true; + return 0; +} + +static int dce_virtual_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + kfree(adev->mode_info.bios_hardcoded_edid); + + drm_kms_helper_poll_fini(adev->ddev); + + drm_mode_config_cleanup(adev->ddev); + adev->mode_info.mode_config_initialized = false; + return 0; +} + +static int dce_virtual_hw_init(void *handle) +{ + return 0; +} + +static int dce_virtual_hw_fini(void *handle) +{ + return 0; +} + +static int dce_virtual_suspend(void *handle) +{ + return dce_virtual_hw_fini(handle); +} + +static int dce_virtual_resume(void *handle) +{ + int ret; + + ret = dce_virtual_hw_init(handle); + + return ret; +} + +static bool dce_virtual_is_idle(void *handle) +{ + return true; +} + +static int dce_virtual_wait_for_idle(void *handle) +{ + return 0; +} + +static int dce_virtual_soft_reset(void *handle) +{ + return 0; +} + +static int dce_virtual_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int dce_virtual_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +const struct amd_ip_funcs dce_virtual_ip_funcs = { + .name = "dce_virtual", + .early_init = dce_virtual_early_init, + .late_init = NULL, + .sw_init = dce_virtual_sw_init, + .sw_fini = dce_virtual_sw_fini, + .hw_init = dce_virtual_hw_init, + .hw_fini = dce_virtual_hw_fini, + .suspend = dce_virtual_suspend, + .resume = dce_virtual_resume, + .is_idle = dce_virtual_is_idle, + .wait_for_idle = dce_virtual_wait_for_idle, + .soft_reset = dce_virtual_soft_reset, + .set_clockgating_state = dce_virtual_set_clockgating_state, + .set_powergating_state = dce_virtual_set_powergating_state, +}; + +/* these are handled by the primary encoders */ +static void dce_virtual_encoder_prepare(struct drm_encoder *encoder) +{ + return; +} + +static void dce_virtual_encoder_commit(struct drm_encoder *encoder) +{ + return; +} + +static void +dce_virtual_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return; +} + +static void dce_virtual_encoder_disable(struct drm_encoder *encoder) +{ + return; +} + +static void +dce_virtual_encoder_dpms(struct drm_encoder *encoder, int mode) +{ + return; +} + +static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + + /* set the active encoder to connector routing */ + amdgpu_encoder_set_active_device(encoder); + + return true; +} + +static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs = { + .dpms = dce_virtual_encoder_dpms, + .mode_fixup = dce_virtual_encoder_mode_fixup, + .prepare = dce_virtual_encoder_prepare, + .mode_set = dce_virtual_encoder_mode_set, + .commit = dce_virtual_encoder_commit, + .disable = dce_virtual_encoder_disable, +}; + +static void dce_virtual_encoder_destroy(struct drm_encoder *encoder) +{ + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + + kfree(amdgpu_encoder->enc_priv); + drm_encoder_cleanup(encoder); + kfree(amdgpu_encoder); +} + +static const struct drm_encoder_funcs dce_virtual_encoder_funcs = { + .destroy = dce_virtual_encoder_destroy, +}; + +static void dce_virtual_encoder_add(struct amdgpu_device *adev, + uint32_t encoder_enum, + uint32_t supported_device, + u16 caps) +{ + struct drm_device *dev = adev->ddev; + struct drm_encoder *encoder; + struct amdgpu_encoder *amdgpu_encoder; + + /* see if we already added it */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + amdgpu_encoder = to_amdgpu_encoder(encoder); + if (amdgpu_encoder->encoder_enum == encoder_enum) { + amdgpu_encoder->devices |= supported_device; + return; + } + + } + + /* add a new one */ + amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); + if (!amdgpu_encoder) + return; + + encoder = &amdgpu_encoder->base; + encoder->possible_crtcs = 0x1; + amdgpu_encoder->enc_priv = NULL; + amdgpu_encoder->encoder_enum = encoder_enum; + amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; + amdgpu_encoder->devices = supported_device; + amdgpu_encoder->rmx_type = RMX_OFF; + amdgpu_encoder->underscan_type = UNDERSCAN_OFF; + amdgpu_encoder->is_ext_encoder = false; + amdgpu_encoder->caps = caps; + + drm_encoder_init(dev, encoder, &dce_virtual_encoder_funcs, + DRM_MODE_ENCODER_VIRTUAL, NULL); + drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs); + DRM_INFO("[FM]encoder: %d is VIRTUAL\n", amdgpu_encoder->encoder_id); +} + +static const struct amdgpu_display_funcs dce_virtual_display_funcs = { + .set_vga_render_state = &dce_virtual_set_vga_render_state, + .bandwidth_update = &dce_virtual_bandwidth_update, + .vblank_get_counter = &dce_virtual_vblank_get_counter, + .vblank_wait = &dce_virtual_vblank_wait, + .is_display_hung = &dce_virtual_is_display_hung, + .backlight_set_level = NULL, + .backlight_get_level = NULL, + .hpd_sense = &dce_virtual_hpd_sense, + .hpd_set_polarity = &dce_virtual_hpd_set_polarity, + .hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg, + .page_flip = &dce_virtual_page_flip, + .page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos, + .add_encoder = &dce_virtual_encoder_add, + .add_connector = &amdgpu_connector_add, + .stop_mc_access = &dce_virtual_stop_mc_access, + .resume_mc_access = &dce_virtual_resume_mc_access, +}; + +static void dce_virtual_set_display_funcs(struct amdgpu_device *adev) +{ + if (adev->mode_info.funcs == NULL) + adev->mode_info.funcs = &dce_virtual_display_funcs; +} + +static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer) +{ + struct amdgpu_mode_info *mode_info = container_of(vblank_timer, struct amdgpu_mode_info ,vblank_timer); + struct amdgpu_device *adev = container_of(mode_info, struct amdgpu_device ,mode_info); + unsigned crtc = 0; + drm_handle_vblank(adev->ddev, crtc); + dce_virtual_pageflip_irq(adev, NULL, NULL); + hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL); + return HRTIMER_NORESTART; +} + +static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, + int crtc, + enum amdgpu_interrupt_state state) +{ + if (crtc >= adev->mode_info.num_crtc) { + DRM_DEBUG("invalid crtc %d\n", crtc); + return; + } + + if (state && !adev->mode_info.vsync_timer_enabled) { + DRM_DEBUG("Enable software vsync timer\n"); + hrtimer_init(&adev->mode_info.vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_set_expires(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD)); + adev->mode_info.vblank_timer.function = dce_virtual_vblank_timer_handle; + hrtimer_start(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL); + } else if (!state && adev->mode_info.vsync_timer_enabled) { + DRM_DEBUG("Disable software vsync timer\n"); + hrtimer_cancel(&adev->mode_info.vblank_timer); + } + + adev->mode_info.vsync_timer_enabled = state; + DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state); +} + + +static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + switch (type) { + case AMDGPU_CRTC_IRQ_VBLANK1: + dce_virtual_set_crtc_vblank_interrupt_state(adev, 0, state); + break; + default: + break; + } + return 0; +} + +static void dce_virtual_crtc_vblank_int_ack(struct amdgpu_device *adev, + int crtc) +{ + if (crtc >= adev->mode_info.num_crtc) { + DRM_DEBUG("invalid crtc %d\n", crtc); + return; + } +} + +static int dce_virtual_crtc_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + unsigned crtc = 0; + unsigned irq_type = AMDGPU_CRTC_IRQ_VBLANK1; + + dce_virtual_crtc_vblank_int_ack(adev, crtc); + + if (amdgpu_irq_enabled(adev, source, irq_type)) { + drm_handle_vblank(adev->ddev, crtc); + } + dce_virtual_pageflip_irq(adev, NULL, NULL); + DRM_DEBUG("IH: D%d vblank\n", crtc + 1); + return 0; +} + +static int dce_virtual_set_pageflip_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + if (type >= adev->mode_info.num_crtc) { + DRM_ERROR("invalid pageflip crtc %d\n", type); + return -EINVAL; + } + DRM_DEBUG("[FM]set pageflip irq type %d state %d\n", type, state); + + return 0; +} + +static int dce_virtual_pageflip_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + unsigned long flags; + unsigned crtc_id = 0; + struct amdgpu_crtc *amdgpu_crtc; + struct amdgpu_flip_work *works; + + crtc_id = 0; + amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; + + if (crtc_id >= adev->mode_info.num_crtc) { + DRM_ERROR("invalid pageflip crtc %d\n", crtc_id); + return -EINVAL; + } + + /* IRQ could occur when in initial stage */ + if (amdgpu_crtc == NULL) + return 0; + + spin_lock_irqsave(&adev->ddev->event_lock, flags); + works = amdgpu_crtc->pflip_works; + if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { + DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " + "AMDGPU_FLIP_SUBMITTED(%d)\n", + amdgpu_crtc->pflip_status, + AMDGPU_FLIP_SUBMITTED); + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + return 0; + } + + /* page flip completed. clean up */ + amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; + amdgpu_crtc->pflip_works = NULL; + + /* wakeup usersapce */ + if (works->event) + drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); + + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + + drm_crtc_vblank_put(&amdgpu_crtc->base); + schedule_work(&works->unpin_work); + + return 0; +} + +static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = { + .set = dce_virtual_set_crtc_irq_state, + .process = dce_virtual_crtc_irq, +}; + +static const struct amdgpu_irq_src_funcs dce_virtual_pageflip_irq_funcs = { + .set = dce_virtual_set_pageflip_irq_state, + .process = dce_virtual_pageflip_irq, +}; + +static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST; + adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs; + + adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST; + adev->pageflip_irq.funcs = &dce_virtual_pageflip_irq_funcs; +} + diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h new file mode 100644 index 000000000000..e239243f6ebc --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h @@ -0,0 +1,31 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DCE_VIRTUAL_H__ +#define __DCE_VIRTUAL_H__ + +extern const struct amd_ip_funcs dce_virtual_ip_funcs; +#define DCE_VIRTUAL_VBLANK_PERIOD 16666666 + +#endif + diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index d869d058ef24..f4fbec3e224e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4465,24 +4465,21 @@ static int gfx_v7_0_sw_init(void *handle) } /* reserve GDS, GWS and OA resource for gfx */ - r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GDS, 0, - NULL, NULL, &adev->gds.gds_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS, + &adev->gds.gds_gfx_bo, NULL, NULL); if (r) return r; - r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GWS, 0, - NULL, NULL, &adev->gds.gws_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS, + &adev->gds.gws_gfx_bo, NULL, NULL); if (r) return r; - r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_OA, 0, - NULL, NULL, &adev->gds.oa_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA, + &adev->gds.oa_gfx_bo, NULL, NULL); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index bff8668e9e6d..c6a63c2f91e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -270,7 +270,8 @@ static const u32 tonga_mgcg_cgcg_init[] = static const u32 golden_settings_polaris11_a11[] = { - mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208, + mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208, + mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000, mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, mmDB_DEBUG2, 0xf00fffff, 0x00000400, mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, @@ -279,7 +280,7 @@ static const u32 golden_settings_polaris11_a11[] = mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000, mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c, mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c, - mmSQ_CONFIG, 0x07f80000, 0x07180000, + mmSQ_CONFIG, 0x07f80000, 0x01180000, mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, mmTCC_CTRL, 0x00100000, 0xf31fff7f, mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3, @@ -301,8 +302,8 @@ static const u32 polaris11_golden_common_all[] = static const u32 golden_settings_polaris10_a11[] = { mmATC_MISC_CG, 0x000c0fc0, 0x000c0200, - mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208, - mmCB_HW_CONTROL_2, 0, 0x0f000000, + mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208, + mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000, mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, mmDB_DEBUG2, 0xf00fffff, 0x00000400, mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, @@ -409,6 +410,7 @@ static const u32 golden_settings_iceland_a11[] = mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002, mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000, + mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c, mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd, mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, mmTCC_CTRL, 0x00100000, 0xf31fff7f, @@ -505,8 +507,10 @@ static const u32 cz_golden_settings_a11[] = mmGB_GPU_ID, 0x0000000f, 0x00000000, mmPA_SC_ENHANCE, 0xffffffff, 0x00000001, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, + mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c, mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd, mmTA_CNTL_AUX, 0x000f000f, 0x00010000, + mmTCC_CTRL, 0x00100000, 0xf31fff7f, mmTCC_EXE_DISABLE, 0x00000002, 0x00000002, mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302 @@ -699,7 +703,10 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) polaris10_golden_common_all, (const u32)ARRAY_SIZE(polaris10_golden_common_all)); WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C); - if (adev->pdev->revision == 0xc7) { + if (adev->pdev->revision == 0xc7 && + ((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) || + (adev->pdev->subsystem_device == 0x4a8 && adev->pdev->subsystem_vendor == 0x1043) || + (adev->pdev->subsystem_device == 0x9480 && adev->pdev->subsystem_vendor == 0x1682))) { amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD); amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0); } @@ -1229,10 +1236,9 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) if (adev->gfx.rlc.clear_state_obj) { r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); if (unlikely(r != 0)) - dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r); + dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r); amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); adev->gfx.rlc.clear_state_obj = NULL; } @@ -1244,7 +1250,6 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj); adev->gfx.rlc.cp_table_obj = NULL; } @@ -1286,14 +1291,14 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) &adev->gfx.rlc.clear_state_gpu_addr); if (r) { amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); - dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r); + dev_warn(adev->dev, "(%d) pin RLC cbs bo failed\n", r); gfx_v8_0_rlc_fini(adev); return r; } r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr); if (r) { - dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r); + dev_warn(adev->dev, "(%d) map RLC cbs bo failed\n", r); gfx_v8_0_rlc_fini(adev); return r; } @@ -1328,7 +1333,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) &adev->gfx.rlc.cp_table_gpu_addr); if (r) { amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r); + dev_warn(adev->dev, "(%d) pin RLC cp table bo failed\n", r); return r; } r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr); @@ -1341,7 +1346,6 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); - } return 0; @@ -1357,7 +1361,6 @@ static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); - amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj); adev->gfx.mec.hpd_eop_obj = NULL; } @@ -2078,24 +2081,21 @@ static int gfx_v8_0_sw_init(void *handle) } /* reserve GDS, GWS and OA resource for gfx */ - r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GDS, 0, NULL, - NULL, &adev->gds.gds_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS, + &adev->gds.gds_gfx_bo, NULL, NULL); if (r) return r; - r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GWS, 0, NULL, - NULL, &adev->gds.gws_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS, + &adev->gds.gws_gfx_bo, NULL, NULL); if (r) return r; - r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, - PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_OA, 0, NULL, - NULL, &adev->gds.oa_gfx_bo); + r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA, + &adev->gds.oa_gfx_bo, NULL, NULL); if (r) return r; @@ -2123,9 +2123,7 @@ static int gfx_v8_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); gfx_v8_0_mec_fini(adev); - gfx_v8_0_rlc_fini(adev); - gfx_v8_0_free_microcode(adev); return 0; @@ -3461,19 +3459,16 @@ static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, else data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); - if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) { - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); + if (se_num == 0xffffffff) data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); - } else if (se_num == 0xffffffff) { - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); - } else if (sh_num == 0xffffffff) { - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); + else data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); - } else { + + if (sh_num == 0xffffffff) + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); + else data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); - } + WREG32(mmGRBM_GFX_INDEX, data); } @@ -3486,11 +3481,10 @@ static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; - data = RREG32(mmCC_RB_BACKEND_DISABLE); - data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE); + data = RREG32(mmCC_RB_BACKEND_DISABLE) | + RREG32(mmGC_USER_RB_BACKEND_DISABLE); - data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; - data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; + data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE); mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_backends_per_se / adev->gfx.config.max_sh_per_se); @@ -3572,16 +3566,12 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) u32 tmp; int i; - tmp = RREG32(mmGRBM_CNTL); - tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff); - WREG32(mmGRBM_CNTL, tmp); - + WREG32_FIELD(GRBM_CNTL, READ_TIMEOUT, 0xFF); WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config); WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config); gfx_v8_0_tiling_mode_table_init(adev); - gfx_v8_0_setup_rb(adev); gfx_v8_0_get_cu_info(adev); @@ -3765,9 +3755,7 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev) sizeof(indirect_start_offsets)/sizeof(int)); /* save and restore list */ - temp = RREG32(mmRLC_SRM_CNTL); - temp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; - WREG32(mmRLC_SRM_CNTL, temp); + WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1); WREG32(mmRLC_SRM_ARAM_ADDR, 0); for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++) @@ -3804,11 +3792,7 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev) static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev) { - uint32_t data; - - data = RREG32(mmRLC_SRM_CNTL); - data |= RLC_SRM_CNTL__SRM_ENABLE_MASK; - WREG32(mmRLC_SRM_CNTL, data); + WREG32_FIELD(RLC_SRM_CNTL, SRM_ENABLE, 1); } static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev) @@ -3818,75 +3802,34 @@ static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev) if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG | AMD_PG_SUPPORT_GFX_DMG)) { - data = RREG32(mmCP_RB_WPTR_POLL_CNTL); - data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK; - data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); - WREG32(mmCP_RB_WPTR_POLL_CNTL, data); - - data = 0; - data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT); - data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT); - data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT); - data |= (0x10 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT); - WREG32(mmRLC_PG_DELAY, data); + WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60); - data = RREG32(mmRLC_PG_DELAY_2); - data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK; - data |= (0x3 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT); - WREG32(mmRLC_PG_DELAY_2, data); + data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10); + data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10); + data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10); + data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10); + WREG32(mmRLC_PG_DELAY, data); - data = RREG32(mmRLC_AUTO_PG_CTRL); - data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK; - data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT); - WREG32(mmRLC_AUTO_PG_CTRL, data); + WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3); + WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0); } } static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev, bool enable) { - u32 data, orig; - - orig = data = RREG32(mmRLC_PG_CNTL); - - if (enable) - data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; - - if (orig != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PU_ENABLE, enable ? 1 : 0); } static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev, bool enable) { - u32 data, orig; - - orig = data = RREG32(mmRLC_PG_CNTL); - - if (enable) - data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; - - if (orig != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PD_ENABLE, enable ? 1 : 0); } static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable) { - u32 data, orig; - - orig = data = RREG32(mmRLC_PG_CNTL); - - if (enable) - data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK; - else - data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK; - - if (orig != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 1 : 0); } static void gfx_v8_0_init_pg(struct amdgpu_device *adev) @@ -3925,34 +3868,24 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev) void gfx_v8_0_rlc_stop(struct amdgpu_device *adev) { - u32 tmp = RREG32(mmRLC_CNTL); - - tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0); - WREG32(mmRLC_CNTL, tmp); + WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 0); gfx_v8_0_enable_gui_idle_interrupt(adev, false); - gfx_v8_0_wait_for_rlc_serdes(adev); } static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev) { - u32 tmp = RREG32(mmGRBM_SOFT_RESET); - - tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); - WREG32(mmGRBM_SOFT_RESET, tmp); + WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); udelay(50); - tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); - WREG32(mmGRBM_SOFT_RESET, tmp); + + WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); udelay(50); } static void gfx_v8_0_rlc_start(struct amdgpu_device *adev) { - u32 tmp = RREG32(mmRLC_CNTL); - - tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 1); - WREG32(mmRLC_CNTL, tmp); + WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 1); /* carrizo do enable cp interrupt after cp inited */ if (!(adev->flags & AMD_IS_APU)) @@ -3994,14 +3927,13 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) /* disable CG */ WREG32(mmRLC_CGCG_CGLS_CTRL, 0); if (adev->asic_type == CHIP_POLARIS11 || - adev->asic_type == CHIP_POLARIS10) + adev->asic_type == CHIP_POLARIS10) WREG32(mmRLC_CGCG_CGLS_CTRL_3D, 0); /* disable PG */ WREG32(mmRLC_PG_CNTL, 0); gfx_v8_0_rlc_reset(adev); - gfx_v8_0_init_pg(adev); if (!adev->pp_enabled) { @@ -4296,12 +4228,10 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) gfx_v8_0_cp_gfx_start(adev); ring->ready = true; r = amdgpu_ring_test_ring(ring); - if (r) { + if (r) ring->ready = false; - return r; - } - return 0; + return r; } static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) @@ -4976,7 +4906,6 @@ static int gfx_v8_0_hw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; gfx_v8_0_init_golden_registers(adev); - gfx_v8_0_gpu_init(adev); r = gfx_v8_0_rlc_resume(adev); @@ -4984,8 +4913,6 @@ static int gfx_v8_0_hw_init(void *handle) return r; r = gfx_v8_0_cp_resume(adev); - if (r) - return r; return r; } @@ -5033,25 +4960,22 @@ static bool gfx_v8_0_is_idle(void *handle) static int gfx_v8_0_wait_for_idle(void *handle) { unsigned i; - u32 tmp; struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { - /* read MC_STATUS */ - tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK; - - if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE)) + if (gfx_v8_0_is_idle(handle)) return 0; + udelay(1); } return -ETIMEDOUT; } -static int gfx_v8_0_soft_reset(void *handle) +static int gfx_v8_0_check_soft_reset(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 tmp; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* GRBM_STATUS */ tmp = RREG32(mmGRBM_STATUS); @@ -5060,16 +4984,12 @@ static int gfx_v8_0_soft_reset(void *handle) GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | - GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) { + GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK | + GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1); grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX, 1); - } - - if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { - grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, - GRBM_SOFT_RESET, SOFT_RESET_CP, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); } @@ -5080,73 +5000,199 @@ static int gfx_v8_0_soft_reset(void *handle) grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); + if (REG_GET_FIELD(tmp, GRBM_STATUS2, CPF_BUSY) || + REG_GET_FIELD(tmp, GRBM_STATUS2, CPC_BUSY) || + REG_GET_FIELD(tmp, GRBM_STATUS2, CPG_BUSY)) { + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, + SOFT_RESET_CPF, 1); + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, + SOFT_RESET_CPC, 1); + grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, + SOFT_RESET_CPG, 1); + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, + SOFT_RESET_GRBM, 1); + } + /* SRBM_STATUS */ tmp = RREG32(mmSRBM_STATUS); if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING)) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); + if (REG_GET_FIELD(tmp, SRBM_STATUS, SEM_BUSY)) + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, + SRBM_SOFT_RESET, SOFT_RESET_SEM, 1); if (grbm_soft_reset || srbm_soft_reset) { - /* stop the rlc */ - gfx_v8_0_rlc_stop(adev); + adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = true; + adev->gfx.grbm_soft_reset = grbm_soft_reset; + adev->gfx.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = false; + adev->gfx.grbm_soft_reset = 0; + adev->gfx.srbm_soft_reset = 0; + } + + return 0; +} + +static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + int i; + vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) { + u32 tmp; + tmp = RREG32(mmCP_HQD_DEQUEUE_REQUEST); + tmp = REG_SET_FIELD(tmp, CP_HQD_DEQUEUE_REQUEST, + DEQUEUE_REQ, 2); + WREG32(mmCP_HQD_DEQUEUE_REQUEST, tmp); + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK)) + break; + udelay(1); + } + } +} + +static int gfx_v8_0_pre_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 grbm_soft_reset = 0, srbm_soft_reset = 0; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) + return 0; + + grbm_soft_reset = adev->gfx.grbm_soft_reset; + srbm_soft_reset = adev->gfx.srbm_soft_reset; + + /* stop the rlc */ + gfx_v8_0_rlc_stop(adev); + + if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) /* Disable GFX parsing/prefetching */ gfx_v8_0_cp_gfx_enable(adev, false); + if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) { + int i; + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; + + gfx_v8_0_inactive_hqd(adev, ring); + } /* Disable MEC parsing/prefetching */ gfx_v8_0_cp_compute_enable(adev, false); + } - if (grbm_soft_reset || srbm_soft_reset) { - tmp = RREG32(mmGMCON_DEBUG); - tmp = REG_SET_FIELD(tmp, - GMCON_DEBUG, GFX_STALL, 1); - tmp = REG_SET_FIELD(tmp, - GMCON_DEBUG, GFX_CLEAR, 1); - WREG32(mmGMCON_DEBUG, tmp); + return 0; +} - udelay(50); - } +static int gfx_v8_0_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 grbm_soft_reset = 0, srbm_soft_reset = 0; + u32 tmp; - if (grbm_soft_reset) { - tmp = RREG32(mmGRBM_SOFT_RESET); - tmp |= grbm_soft_reset; - dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmGRBM_SOFT_RESET, tmp); - tmp = RREG32(mmGRBM_SOFT_RESET); + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) + return 0; - udelay(50); + grbm_soft_reset = adev->gfx.grbm_soft_reset; + srbm_soft_reset = adev->gfx.srbm_soft_reset; - tmp &= ~grbm_soft_reset; - WREG32(mmGRBM_SOFT_RESET, tmp); - tmp = RREG32(mmGRBM_SOFT_RESET); - } + if (grbm_soft_reset || srbm_soft_reset) { + tmp = RREG32(mmGMCON_DEBUG); + tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 1); + tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 1); + WREG32(mmGMCON_DEBUG, tmp); + udelay(50); + } - if (srbm_soft_reset) { - tmp = RREG32(mmSRBM_SOFT_RESET); - tmp |= srbm_soft_reset; - dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); + if (grbm_soft_reset) { + tmp = RREG32(mmGRBM_SOFT_RESET); + tmp |= grbm_soft_reset; + dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmGRBM_SOFT_RESET, tmp); + tmp = RREG32(mmGRBM_SOFT_RESET); - udelay(50); + udelay(50); - tmp &= ~srbm_soft_reset; - WREG32(mmSRBM_SOFT_RESET, tmp); - tmp = RREG32(mmSRBM_SOFT_RESET); - } + tmp &= ~grbm_soft_reset; + WREG32(mmGRBM_SOFT_RESET, tmp); + tmp = RREG32(mmGRBM_SOFT_RESET); + } - if (grbm_soft_reset || srbm_soft_reset) { - tmp = RREG32(mmGMCON_DEBUG); - tmp = REG_SET_FIELD(tmp, - GMCON_DEBUG, GFX_STALL, 0); - tmp = REG_SET_FIELD(tmp, - GMCON_DEBUG, GFX_CLEAR, 0); - WREG32(mmGMCON_DEBUG, tmp); - } + if (srbm_soft_reset) { + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); - /* Wait a little for things to settle down */ udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + } + + if (grbm_soft_reset || srbm_soft_reset) { + tmp = RREG32(mmGMCON_DEBUG); + tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 0); + tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 0); + WREG32(mmGMCON_DEBUG, tmp); + } + + /* Wait a little for things to settle down */ + udelay(50); + + return 0; +} + +static void gfx_v8_0_init_hqd(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0); + WREG32(mmCP_HQD_PQ_RPTR, 0); + WREG32(mmCP_HQD_PQ_WPTR, 0); + vi_srbm_select(adev, 0, 0, 0, 0); +} + +static int gfx_v8_0_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 grbm_soft_reset = 0, srbm_soft_reset = 0; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) + return 0; + + grbm_soft_reset = adev->gfx.grbm_soft_reset; + srbm_soft_reset = adev->gfx.srbm_soft_reset; + + if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) + gfx_v8_0_cp_gfx_resume(adev); + + if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) || + REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) { + int i; + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; + + gfx_v8_0_init_hqd(adev, ring); + } + gfx_v8_0_cp_compute_resume(adev); } + gfx_v8_0_rlc_start(adev); + return 0; } @@ -5265,8 +5311,6 @@ static int gfx_v8_0_late_init(void *handle) static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, bool enable) { - uint32_t data, temp; - if (adev->asic_type == CHIP_POLARIS11) /* Send msg to SMU via Powerplay */ amdgpu_set_powergating_state(adev, @@ -5274,83 +5318,35 @@ static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade enable ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE); - temp = data = RREG32(mmRLC_PG_CNTL); - /* Enable static MGPG */ - if (enable) - data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; - - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0); } static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev, bool enable) { - uint32_t data, temp; - - temp = data = RREG32(mmRLC_PG_CNTL); - /* Enable dynamic MGPG */ - if (enable) - data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; - - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, DYN_PER_CU_PG_ENABLE, enable ? 1 : 0); } static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev, bool enable) { - uint32_t data, temp; - - temp = data = RREG32(mmRLC_PG_CNTL); - /* Enable quick PG */ - if (enable) - data |= RLC_PG_CNTL__QUICK_PG_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__QUICK_PG_ENABLE_MASK; - - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, QUICK_PG_ENABLE, enable ? 1 : 0); } static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev, bool enable) { - u32 data, orig; - - orig = data = RREG32(mmRLC_PG_CNTL); - - if (enable) - data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; - - if (orig != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, enable ? 1 : 0); } static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev, bool enable) { - u32 data, orig; - - orig = data = RREG32(mmRLC_PG_CNTL); - - if (enable) - data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK; - else - data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK; - - if (orig != data) - WREG32(mmRLC_PG_CNTL, data); + WREG32_FIELD(RLC_PG_CNTL, GFX_PIPELINE_PG_ENABLE, enable ? 1 : 0); /* Read any GFX register to wake up GFX. */ if (!enable) - data = RREG32(mmDB_RENDER_CONTROL); + RREG32(mmDB_RENDER_CONTROL); } static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev, @@ -5426,15 +5422,15 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, data = RREG32(mmRLC_SERDES_WR_CTRL); if (adev->asic_type == CHIP_STONEY) - data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK | - RLC_SERDES_WR_CTRL__READ_COMMAND_MASK | - RLC_SERDES_WR_CTRL__P1_SELECT_MASK | - RLC_SERDES_WR_CTRL__P2_SELECT_MASK | - RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK | - RLC_SERDES_WR_CTRL__POWER_DOWN_MASK | - RLC_SERDES_WR_CTRL__POWER_UP_MASK | - RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK | - RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK); + data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK | + RLC_SERDES_WR_CTRL__READ_COMMAND_MASK | + RLC_SERDES_WR_CTRL__P1_SELECT_MASK | + RLC_SERDES_WR_CTRL__P2_SELECT_MASK | + RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK | + RLC_SERDES_WR_CTRL__POWER_DOWN_MASK | + RLC_SERDES_WR_CTRL__POWER_UP_MASK | + RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK | + RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK); else data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK | RLC_SERDES_WR_CTRL__READ_COMMAND_MASK | @@ -5457,10 +5453,10 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, #define MSG_ENTER_RLC_SAFE_MODE 1 #define MSG_EXIT_RLC_SAFE_MODE 0 - -#define RLC_GPR_REG2__REQ_MASK 0x00000001 -#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001 -#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e +#define RLC_GPR_REG2__REQ_MASK 0x00000001 +#define RLC_GPR_REG2__REQ__SHIFT 0 +#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001 +#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev) { @@ -5490,7 +5486,7 @@ static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev) } for (i = 0; i < adev->usec_timeout; i++) { - if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0) + if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ)) break; udelay(1); } @@ -5518,7 +5514,7 @@ static void cz_exit_rlc_safe_mode(struct amdgpu_device *adev) } for (i = 0; i < adev->usec_timeout; i++) { - if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0) + if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ)) break; udelay(1); } @@ -5550,7 +5546,7 @@ static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev) } for (i = 0; i < adev->usec_timeout; i++) { - if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0) + if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) break; udelay(1); } @@ -5577,7 +5573,7 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev) } for (i = 0; i < adev->usec_timeout; i++) { - if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0) + if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) break; udelay(1); } @@ -5618,21 +5614,12 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev /* It is disabled by HW by default */ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { - if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) /* 1 - RLC memory Light sleep */ - temp = data = RREG32(mmRLC_MEM_SLP_CNTL); - data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; - if (temp != data) - WREG32(mmRLC_MEM_SLP_CNTL, data); - } + WREG32_FIELD(RLC_MEM_SLP_CNTL, RLC_MEM_LS_EN, 1); - if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { - /* 2 - CP memory Light sleep */ - temp = data = RREG32(mmCP_MEM_SLP_CNTL); - data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; - if (temp != data) - WREG32(mmCP_MEM_SLP_CNTL, data); - } + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) + WREG32_FIELD(CP_MEM_SLP_CNTL, CP_MEM_LS_EN, 1); } /* 3 - RLC_CGTT_MGCG_OVERRIDE */ @@ -5850,25 +5837,18 @@ static int gfx_v8_0_set_clockgating_state(void *handle, static u32 gfx_v8_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) { - u32 rptr; - - rptr = ring->adev->wb.wb[ring->rptr_offs]; - - return rptr; + return ring->adev->wb.wb[ring->rptr_offs]; } static u32 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - u32 wptr; if (ring->use_doorbell) /* XXX check if swapping is necessary on BE */ - wptr = ring->adev->wb.wb[ring->wptr_offs]; + return ring->adev->wb.wb[ring->wptr_offs]; else - wptr = RREG32(mmCP_RB0_WPTR); - - return wptr; + return RREG32(mmCP_RB0_WPTR); } static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) @@ -5967,9 +5947,9 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); amdgpu_ring_write(ring, #ifdef __BIG_ENDIAN - (2 << 0) | + (2 << 0) | #endif - (ib->gpu_addr & 0xFFFFFFFC)); + (ib->gpu_addr & 0xFFFFFFFC)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); amdgpu_ring_write(ring, control); } @@ -6114,33 +6094,14 @@ static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring, static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, enum amdgpu_interrupt_state state) { - u32 cp_int_cntl; - - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - TIME_STAMP_INT_ENABLE, 0); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - case AMDGPU_IRQ_STATE_ENABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = - REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - TIME_STAMP_INT_ENABLE, 1); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - default: - break; - } + WREG32_FIELD(CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE, + state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); } static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, int me, int pipe, enum amdgpu_interrupt_state state) { - u32 mec_int_cntl, mec_int_cntl_reg; - /* * amdgpu controls only pipe 0 of MEC1. That's why this function only * handles the setting of interrupts for this specific pipe. All other @@ -6150,7 +6111,6 @@ static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, if (me == 1) { switch (pipe) { case 0: - mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL; break; default: DRM_DEBUG("invalid pipe %d\n", pipe); @@ -6161,22 +6121,8 @@ static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, return; } - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - mec_int_cntl = RREG32(mec_int_cntl_reg); - mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, - TIME_STAMP_INT_ENABLE, 0); - WREG32(mec_int_cntl_reg, mec_int_cntl); - break; - case AMDGPU_IRQ_STATE_ENABLE: - mec_int_cntl = RREG32(mec_int_cntl_reg); - mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, - TIME_STAMP_INT_ENABLE, 1); - WREG32(mec_int_cntl_reg, mec_int_cntl); - break; - default: - break; - } + WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, TIME_STAMP_INT_ENABLE, + state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); } static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev, @@ -6184,24 +6130,8 @@ static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - u32 cp_int_cntl; - - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - PRIV_REG_INT_ENABLE, 0); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - case AMDGPU_IRQ_STATE_ENABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - PRIV_REG_INT_ENABLE, 1); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - default: - break; - } + WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE, + state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); return 0; } @@ -6211,24 +6141,8 @@ static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - u32 cp_int_cntl; - - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - PRIV_INSTR_INT_ENABLE, 0); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - case AMDGPU_IRQ_STATE_ENABLE: - cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - PRIV_INSTR_INT_ENABLE, 1); - WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); - break; - default: - break; - } + WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE, + state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); return 0; } @@ -6334,7 +6248,10 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .resume = gfx_v8_0_resume, .is_idle = gfx_v8_0_is_idle, .wait_for_idle = gfx_v8_0_wait_for_idle, + .check_soft_reset = gfx_v8_0_check_soft_reset, + .pre_soft_reset = gfx_v8_0_pre_soft_reset, .soft_reset = gfx_v8_0_soft_reset, + .post_soft_reset = gfx_v8_0_post_soft_reset, .set_clockgating_state = gfx_v8_0_set_clockgating_state, .set_powergating_state = gfx_v8_0_set_powergating_state, }; @@ -6475,15 +6392,12 @@ static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; - data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); - data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); - - data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; - data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; + data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) | + RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); mask = gfx_v8_0_create_bitmask(adev->gfx.config.max_cu_per_sh); - return (~data) & mask; + return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask; } static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h index bc82c794312c..ebed1f829297 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h @@ -26,6 +26,4 @@ extern const struct amd_ip_funcs gfx_v8_0_ip_funcs; -void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index d24a82bd0c7a..aa0c4b964621 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -144,6 +144,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev) break; case CHIP_KAVERI: case CHIP_KABINI: + case CHIP_MULLINS: return 0; default: BUG(); } @@ -182,7 +183,7 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev) const struct mc_firmware_header_v1_0 *hdr; const __le32 *fw_data = NULL; const __le32 *io_mc_regs = NULL; - u32 running, blackout = 0; + u32 running; int i, ucode_size, regs_size; if (!adev->mc.fw) @@ -202,11 +203,6 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev) running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); if (running == 0) { - if (running) { - blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); - WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1); - } - /* reset the engine and set to writable */ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); @@ -238,9 +234,6 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev) break; udelay(1); } - - if (running) - WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout); } return 0; @@ -392,7 +385,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) * size equal to the 1024 or vram, whichever is larger. */ if (amdgpu_gart_size == -1) - adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size); + adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev); else adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; @@ -952,6 +945,11 @@ static int gmc_v7_0_sw_init(void *handle) return r; } + r = amdgpu_ttm_global_init(adev); + if (r) { + return r; + } + r = gmc_v7_0_mc_init(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 717359d3ba8c..84c10d5117a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -103,6 +103,11 @@ static const u32 stoney_mgcg_cgcg_init[] = mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 }; +static const u32 golden_settings_stoney_common[] = +{ + mmMC_HUB_RDREQ_UVD, MC_HUB_RDREQ_UVD__PRESCALE_MASK, 0x00000004, + mmMC_RD_GRP_OTH, MC_RD_GRP_OTH__UVD_MASK, 0x00600000 +}; static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) { @@ -142,6 +147,9 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) amdgpu_program_register_sequence(adev, stoney_mgcg_cgcg_init, (const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + golden_settings_stoney_common, + (const u32)ARRAY_SIZE(golden_settings_stoney_common)); break; default: break; @@ -253,7 +261,7 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev) const struct mc_firmware_header_v1_0 *hdr; const __le32 *fw_data = NULL; const __le32 *io_mc_regs = NULL; - u32 running, blackout = 0; + u32 running; int i, ucode_size, regs_size; if (!adev->mc.fw) @@ -279,11 +287,6 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev) running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); if (running == 0) { - if (running) { - blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); - WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1); - } - /* reset the engine and set to writable */ WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); @@ -315,9 +318,6 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev) break; udelay(1); } - - if (running) - WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout); } return 0; @@ -469,7 +469,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) * size equal to the 1024 or vram, whichever is larger. */ if (amdgpu_gart_size == -1) - adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size); + adev->mc.gtt_size = amdgpu_ttm_get_gtt_mem_size(adev); else adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; @@ -949,6 +949,11 @@ static int gmc_v8_0_sw_init(void *handle) return r; } + r = amdgpu_ttm_global_init(adev); + if (r) { + return r; + } + r = gmc_v8_0_mc_init(adev); if (r) return r; @@ -1092,9 +1097,8 @@ static int gmc_v8_0_wait_for_idle(void *handle) } -static int gmc_v8_0_soft_reset(void *handle) +static int gmc_v8_0_check_soft_reset(void *handle) { - struct amdgpu_mode_mc_save save; u32 srbm_soft_reset = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 tmp = RREG32(mmSRBM_STATUS); @@ -1109,13 +1113,42 @@ static int gmc_v8_0_soft_reset(void *handle) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1); } - if (srbm_soft_reset) { - gmc_v8_0_mc_stop(adev, &save); - if (gmc_v8_0_wait_for_idle((void *)adev)) { - dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); - } + adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = true; + adev->mc.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = false; + adev->mc.srbm_soft_reset = 0; + } + return 0; +} + +static int gmc_v8_0_pre_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) + return 0; + gmc_v8_0_mc_stop(adev, &adev->mc.save); + if (gmc_v8_0_wait_for_idle(adev)) { + dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); + } + + return 0; +} + +static int gmc_v8_0_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) + return 0; + srbm_soft_reset = adev->mc.srbm_soft_reset; + + if (srbm_soft_reset) { + u32 tmp; tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; @@ -1131,14 +1164,22 @@ static int gmc_v8_0_soft_reset(void *handle) /* Wait a little for things to settle down */ udelay(50); - - gmc_v8_0_mc_resume(adev, &save); - udelay(50); } return 0; } +static int gmc_v8_0_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) + return 0; + + gmc_v8_0_mc_resume(adev, &adev->mc.save); + return 0; +} + static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, @@ -1406,7 +1447,10 @@ const struct amd_ip_funcs gmc_v8_0_ip_funcs = { .resume = gmc_v8_0_resume, .is_idle = gmc_v8_0_is_idle, .wait_for_idle = gmc_v8_0_wait_for_idle, + .check_soft_reset = gmc_v8_0_check_soft_reset, + .pre_soft_reset = gmc_v8_0_pre_soft_reset, .soft_reset = gmc_v8_0_soft_reset, + .post_soft_reset = gmc_v8_0_post_soft_reset, .set_clockgating_state = gmc_v8_0_set_clockgating_state, .set_powergating_state = gmc_v8_0_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index a845e883f5fa..f8618a3881a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -2845,7 +2845,11 @@ static int kv_dpm_init(struct amdgpu_device *adev) pi->caps_tcp_ramping = true; } - pi->caps_sclk_ds = true; + if (amdgpu_sclk_deep_sleep_en) + pi->caps_sclk_ds = true; + else + pi->caps_sclk_ds = false; + pi->enable_auto_thermal_throttling = true; pi->disable_nb_ps3_in_battery = false; if (amdgpu_bapm == 0) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 1351c7e834a2..e82229686783 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -749,24 +749,16 @@ static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count) { - while (count) { - unsigned bytes = count * 8; - if (bytes > 0x1FFFF8) - bytes = 0x1FFFF8; - - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); - ib->ptr[ib->length_dw++] = bytes; - ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ - ib->ptr[ib->length_dw++] = lower_32_bits(src); - ib->ptr[ib->length_dw++] = upper_32_bits(src); - ib->ptr[ib->length_dw++] = lower_32_bits(pe); - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - - pe += bytes; - src += bytes; - count -= bytes / 8; - } + unsigned bytes = count * 8; + + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = bytes; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(src); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); } /** @@ -774,39 +766,27 @@ static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib, * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry - * @addr: dst addr to write into pe + * @value: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes - * @flags: access flags * * Update PTEs by writing them manually using sDMA (CIK). */ -static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, - const dma_addr_t *pages_addr, uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags) +static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, + uint64_t value, unsigned count, + uint32_t incr) { - uint64_t value; - unsigned ndw; - - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - /* for non-physically contiguous pages (system) */ - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = ndw; - for (; ndw > 0; ndw -= 2, --count, pe += 8) { - value = amdgpu_vm_map_gart(pages_addr, addr); - addr += incr; - value |= flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } + unsigned ndw = count * 2; + + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = pe; + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = ndw; + for (; ndw > 0; ndw -= 2, --count, pe += 8) { + ib->ptr[ib->length_dw++] = lower_32_bits(value); + ib->ptr[ib->length_dw++] = upper_32_bits(value); + value += incr; } } @@ -822,40 +802,21 @@ static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, * * Update the page tables using sDMA (CIK). */ -static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, - uint64_t pe, +static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { - uint64_t value; - unsigned ndw; - - while (count) { - ndw = count; - if (ndw > 0x7FFFF) - ndw = 0x7FFFF; - - if (flags & AMDGPU_PTE_VALID) - value = addr; - else - value = 0; - - /* for physically contiguous pages (vram) */ - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); - ib->ptr[ib->length_dw++] = pe; /* dst addr */ - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = flags; /* mask */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = value; /* value */ - ib->ptr[ib->length_dw++] = upper_32_bits(value); - ib->ptr[ib->length_dw++] = incr; /* increment size */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = ndw; /* number of entries */ - - pe += ndw * 8; - addr += ndw * incr; - count -= ndw; - } + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = count; /* number of entries */ } /** diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 653ce5ed55ae..bee4978bec73 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -976,24 +976,16 @@ static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib, uint64_t pe, uint64_t src, unsigned count) { - while (count) { - unsigned bytes = count * 8; - if (bytes > 0x1FFFF8) - bytes = 0x1FFFF8; - - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); - ib->ptr[ib->length_dw++] = bytes; - ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ - ib->ptr[ib->length_dw++] = lower_32_bits(src); - ib->ptr[ib->length_dw++] = upper_32_bits(src); - ib->ptr[ib->length_dw++] = lower_32_bits(pe); - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - - pe += bytes; - src += bytes; - count -= bytes / 8; - } + unsigned bytes = count * 8; + + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = bytes; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(src); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); } /** @@ -1001,39 +993,27 @@ static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib, * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry - * @addr: dst addr to write into pe + * @value: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes - * @flags: access flags * * Update PTEs by writing them manually using sDMA (CIK). */ -static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, - const dma_addr_t *pages_addr, uint64_t pe, - uint64_t addr, unsigned count, - uint32_t incr, uint32_t flags) -{ - uint64_t value; - unsigned ndw; - - while (count) { - ndw = count * 2; - if (ndw > 0xFFFFE) - ndw = 0xFFFFE; - - /* for non-physically contiguous pages (system) */ - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); - ib->ptr[ib->length_dw++] = pe; - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = ndw; - for (; ndw > 0; ndw -= 2, --count, pe += 8) { - value = amdgpu_vm_map_gart(pages_addr, addr); - addr += incr; - value |= flags; - ib->ptr[ib->length_dw++] = value; - ib->ptr[ib->length_dw++] = upper_32_bits(value); - } +static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, + uint64_t value, unsigned count, + uint32_t incr) +{ + unsigned ndw = count * 2; + + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = ndw; + for (; ndw > 0; ndw -= 2, --count, pe += 8) { + ib->ptr[ib->length_dw++] = lower_32_bits(value); + ib->ptr[ib->length_dw++] = upper_32_bits(value); + value += incr; } } @@ -1049,40 +1029,21 @@ static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, * * Update the page tables using sDMA (CIK). */ -static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, - uint64_t pe, +static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { - uint64_t value; - unsigned ndw; - - while (count) { - ndw = count; - if (ndw > 0x7FFFF) - ndw = 0x7FFFF; - - if (flags & AMDGPU_PTE_VALID) - value = addr; - else - value = 0; - - /* for physically contiguous pages (vram) */ - ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); - ib->ptr[ib->length_dw++] = pe; /* dst addr */ - ib->ptr[ib->length_dw++] = upper_32_bits(pe); - ib->ptr[ib->length_dw++] = flags; /* mask */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = value; /* value */ - ib->ptr[ib->length_dw++] = upper_32_bits(value); - ib->ptr[ib->length_dw++] = incr; /* increment size */ - ib->ptr[ib->length_dw++] = 0; - ib->ptr[ib->length_dw++] = ndw; /* number of entries */ - - pe += ndw * 8; - addr += ndw * incr; - count -= ndw; - } + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = count; /* number of entries */ } /** @@ -1320,28 +1281,79 @@ static int sdma_v3_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int sdma_v3_0_soft_reset(void *handle) +static int sdma_v3_0_check_soft_reset(void *handle) { - u32 srbm_soft_reset = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS2); - if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { - /* sdma0 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); - tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0); - WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); + if ((tmp & SRBM_STATUS2__SDMA_BUSY_MASK) || + (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)) { srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; - } - if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) { - /* sdma1 */ - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); - tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0); - WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; } if (srbm_soft_reset) { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = true; + adev->sdma.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = false; + adev->sdma.srbm_soft_reset = 0; + } + + return 0; +} + +static int sdma_v3_0_pre_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) + return 0; + + srbm_soft_reset = adev->sdma.srbm_soft_reset; + + if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) || + REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) { + sdma_v3_0_ctx_switch_enable(adev, false); + sdma_v3_0_enable(adev, false); + } + + return 0; +} + +static int sdma_v3_0_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) + return 0; + + srbm_soft_reset = adev->sdma.srbm_soft_reset; + + if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) || + REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) { + sdma_v3_0_gfx_resume(adev); + sdma_v3_0_rlc_resume(adev); + } + + return 0; +} + +static int sdma_v3_0_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; + u32 tmp; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) + return 0; + + srbm_soft_reset = adev->sdma.srbm_soft_reset; + + if (srbm_soft_reset) { tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); @@ -1559,6 +1571,9 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = { .resume = sdma_v3_0_resume, .is_idle = sdma_v3_0_is_idle, .wait_for_idle = sdma_v3_0_wait_for_idle, + .check_soft_reset = sdma_v3_0_check_soft_reset, + .pre_soft_reset = sdma_v3_0_pre_soft_reset, + .post_soft_reset = sdma_v3_0_post_soft_reset, .soft_reset = sdma_v3_0_soft_reset, .set_clockgating_state = sdma_v3_0_set_clockgating_state, .set_powergating_state = sdma_v3_0_set_powergating_state, diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index c92055805a45..d127d59f953a 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -373,10 +373,10 @@ static int tonga_ih_wait_for_idle(void *handle) return -ETIMEDOUT; } -static int tonga_ih_soft_reset(void *handle) +static int tonga_ih_check_soft_reset(void *handle) { - u32 srbm_soft_reset = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; u32 tmp = RREG32(mmSRBM_STATUS); if (tmp & SRBM_STATUS__IH_BUSY_MASK) @@ -384,6 +384,48 @@ static int tonga_ih_soft_reset(void *handle) SOFT_RESET_IH, 1); if (srbm_soft_reset) { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = true; + adev->irq.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = false; + adev->irq.srbm_soft_reset = 0; + } + + return 0; +} + +static int tonga_ih_pre_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) + return 0; + + return tonga_ih_hw_fini(adev); +} + +static int tonga_ih_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) + return 0; + + return tonga_ih_hw_init(adev); +} + +static int tonga_ih_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) + return 0; + srbm_soft_reset = adev->irq.srbm_soft_reset; + + if (srbm_soft_reset) { + u32 tmp; + tmp = RREG32(mmSRBM_SOFT_RESET); tmp |= srbm_soft_reset; dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); @@ -427,7 +469,10 @@ const struct amd_ip_funcs tonga_ih_ip_funcs = { .resume = tonga_ih_resume, .is_idle = tonga_ih_is_idle, .wait_for_idle = tonga_ih_wait_for_idle, + .check_soft_reset = tonga_ih_check_soft_reset, + .pre_soft_reset = tonga_ih_pre_soft_reset, .soft_reset = tonga_ih_soft_reset, + .post_soft_reset = tonga_ih_post_soft_reset, .set_clockgating_state = tonga_ih_set_clockgating_state, .set_powergating_state = tonga_ih_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 132e613ed674..10c0407dcb6e 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -116,7 +116,7 @@ static int uvd_v4_2_sw_init(void *handle) ring = &adev->uvd.ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf, + r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf, &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 101de136ba63..8513376062c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -112,7 +112,7 @@ static int uvd_v5_0_sw_init(void *handle) ring = &adev->uvd.ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf, + r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf, &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 7f21102bfb99..2abe8a93c99f 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -116,7 +116,7 @@ static int uvd_v6_0_sw_init(void *handle) ring = &adev->uvd.ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf, + r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf, &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); return r; @@ -396,21 +396,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) uvd_v6_0_mc_resume(adev); - /* Set dynamic clock gating in S/W control mode */ - if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) { - uvd_v6_0_set_sw_clock_gating(adev); - } else { - /* disable clock gating */ - uint32_t data = RREG32(mmUVD_CGC_CTRL); - data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; - WREG32(mmUVD_CGC_CTRL, data); - } + /* disable clock gating */ + WREG32_FIELD(UVD_CGC_CTRL, DYN_CLOCK_MODE, 0); /* disable interupt */ - WREG32_P(mmUVD_MASTINT_EN, 0, ~UVD_MASTINT_EN__VCPU_EN_MASK); + WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0); /* stall UMC and register bus before resetting VCPU */ - WREG32_P(mmUVD_LMI_CTRL2, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1); mdelay(1); /* put LMI, VCPU, RBC etc... into reset */ @@ -426,7 +419,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) mdelay(5); /* take UVD block out of reset */ - WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); + WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0); mdelay(5); /* initialize UVD memory controller */ @@ -461,7 +454,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK); /* enable UMC */ - WREG32_P(mmUVD_LMI_CTRL2, 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0); /* boot up the VCPU */ WREG32(mmUVD_SOFT_RESET, 0); @@ -481,11 +474,9 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) break; DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); - WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, - ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1); mdelay(10); - WREG32_P(mmUVD_SOFT_RESET, 0, - ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0); mdelay(10); r = -1; } @@ -502,15 +493,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) /* clear the bit 4 of UVD_STATUS */ WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); + /* force RBC into idle state */ rb_bufsz = order_base_2(ring->ring_size); - tmp = 0; - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); - /* force RBC into idle state */ WREG32(mmUVD_RBC_RB_CNTL, tmp); /* set the write pointer delay */ @@ -531,7 +521,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); - WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); + WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0); return 0; } @@ -748,20 +738,82 @@ static int uvd_v6_0_wait_for_idle(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { - if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) + if (uvd_v6_0_is_idle(handle)) return 0; } return -ETIMEDOUT; } -static int uvd_v6_0_soft_reset(void *handle) +#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd +static int uvd_v6_0_check_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; + u32 tmp = RREG32(mmSRBM_STATUS); + + if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) || + REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) || + (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK)) + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); + + if (srbm_soft_reset) { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = true; + adev->uvd.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = false; + adev->uvd.srbm_soft_reset = 0; + } + return 0; +} +static int uvd_v6_0_pre_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) + return 0; uvd_v6_0_stop(adev); + return 0; +} + +static int uvd_v6_0_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) + return 0; + srbm_soft_reset = adev->uvd.srbm_soft_reset; + + if (srbm_soft_reset) { + u32 tmp; + + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + /* Wait a little for things to settle down */ + udelay(50); + } + + return 0; +} + +static int uvd_v6_0_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) + return 0; - WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, - ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); mdelay(5); return uvd_v6_0_start(adev); @@ -902,21 +954,15 @@ static int uvd_v6_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; - static int curstate = -1; if (adev->asic_type == CHIP_FIJI || - adev->asic_type == CHIP_POLARIS10) - uvd_v6_set_bypass_mode(adev, enable); + adev->asic_type == CHIP_POLARIS10) + uvd_v6_set_bypass_mode(adev, state == AMD_CG_STATE_GATE ? true : false); if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) return 0; - if (curstate == state) - return 0; - - curstate = state; - if (enable) { + if (state == AMD_CG_STATE_GATE) { /* disable HW gating and enable Sw gating */ uvd_v6_0_set_sw_clock_gating(adev); } else { @@ -946,6 +992,8 @@ static int uvd_v6_0_set_powergating_state(void *handle, if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) return 0; + WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); + if (state == AMD_PG_STATE_GATE) { uvd_v6_0_stop(adev); return 0; @@ -966,7 +1014,10 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = { .resume = uvd_v6_0_resume, .is_idle = uvd_v6_0_is_idle, .wait_for_idle = uvd_v6_0_wait_for_idle, + .check_soft_reset = uvd_v6_0_check_soft_reset, + .pre_soft_reset = uvd_v6_0_pre_soft_reset, .soft_reset = uvd_v6_0_soft_reset, + .post_soft_reset = uvd_v6_0_post_soft_reset, .set_clockgating_state = uvd_v6_0_set_clockgating_state, .set_powergating_state = uvd_v6_0_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 80a37a602181..5fa55b52c00e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -40,6 +40,7 @@ #define VCE_V2_0_FW_SIZE (256 * 1024) #define VCE_V2_0_STACK_SIZE (64 * 1024) #define VCE_V2_0_DATA_SIZE (23552 * AMDGPU_MAX_VCE_HANDLES) +#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 static void vce_v2_0_mc_resume(struct amdgpu_device *adev); static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev); @@ -96,6 +97,49 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring) WREG32(mmVCE_RB_WPTR2, ring->wptr); } +static int vce_v2_0_lmi_clean(struct amdgpu_device *adev) +{ + int i, j; + + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + uint32_t status = RREG32(mmVCE_LMI_STATUS); + + if (status & 0x337f) + return 0; + mdelay(10); + } + } + + return -ETIMEDOUT; +} + +static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev) +{ + int i, j; + + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + uint32_t status = RREG32(mmVCE_STATUS); + + if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK) + return 0; + mdelay(10); + } + + DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + WREG32_P(mmVCE_SOFT_RESET, 0, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + mdelay(10); + } + + return -ETIMEDOUT; +} + /** * vce_v2_0_start - start VCE block * @@ -106,7 +150,7 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring) static int vce_v2_0_start(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - int i, j, r; + int r; vce_v2_0_mc_resume(adev); @@ -127,36 +171,12 @@ static int vce_v2_0_start(struct amdgpu_device *adev) WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); - WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); - - WREG32_P(mmVCE_SOFT_RESET, - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - + WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); mdelay(100); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); - WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - - for (i = 0; i < 10; ++i) { - uint32_t status; - for (j = 0; j < 100; ++j) { - status = RREG32(mmVCE_STATUS); - if (status & 2) - break; - mdelay(10); - } - r = 0; - if (status & 2) - break; - - DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); - WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - mdelay(10); - WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - mdelay(10); - r = -1; - } + r = vce_v2_0_firmware_loaded(adev); /* clear BUSY flag */ WREG32_P(mmVCE_STATUS, 0, ~1); @@ -338,47 +358,50 @@ static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated) static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated) { - u32 orig, tmp; + if (vce_v2_0_wait_for_idle(adev)) { + DRM_INFO("VCE is busy, Can't set clock gateing"); + return; + } - if (gated) { - if (vce_v2_0_wait_for_idle(adev)) { - DRM_INFO("VCE is busy, Can't set clock gateing"); - return; - } - WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK); - WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - mdelay(100); - WREG32(mmVCE_STATUS, 0); - } else { - WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); - WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - mdelay(100); + WREG32_P(mmVCE_LMI_CTRL2, 0x100, ~0x100); + + if (vce_v2_0_lmi_clean(adev)) { + DRM_INFO("LMI is busy, Can't set clock gateing"); + return; } - tmp = RREG32(mmVCE_CLOCK_GATING_B); - tmp &= ~0x00060006; + WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK); + WREG32_P(mmVCE_SOFT_RESET, + VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, + ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32(mmVCE_STATUS, 0); + + if (gated) + WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); + /* LMI_MC/LMI_UMC always set in dynamic, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} */ if (gated) { - tmp |= 0xe10000; + /* Force CLOCK OFF , set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {*, 1} */ + WREG32(mmVCE_CLOCK_GATING_B, 0xe90010); } else { - tmp |= 0xe1; - tmp &= ~0xe10000; + /* Force CLOCK ON, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {1, 0} */ + WREG32(mmVCE_CLOCK_GATING_B, 0x800f1); } - WREG32(mmVCE_CLOCK_GATING_B, tmp); - orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING); - tmp &= ~0x1fe000; - tmp &= ~0xff000000; - if (tmp != orig) - WREG32(mmVCE_UENC_CLOCK_GATING, tmp); + /* Set VCE_UENC_CLOCK_GATING always in dynamic mode {*_FORCE_ON, *_FORCE_OFF} = {0, 0}*/; + WREG32(mmVCE_UENC_CLOCK_GATING, 0x40); - orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); - tmp &= ~0x3fc; - if (tmp != orig) - WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); + /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */ + WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00); - if (gated) - WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); - WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_P(mmVCE_LMI_CTRL2, 0, ~0x100); + if(!gated) { + WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); + mdelay(100); + WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + + vce_v2_0_firmware_loaded(adev); + WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); + } } static void vce_v2_0_disable_cg(struct amdgpu_device *adev) @@ -458,9 +481,7 @@ static void vce_v2_0_mc_resume(struct amdgpu_device *adev) WREG32(mmVCE_VCPU_CACHE_SIZE2, size); WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); - - WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK, - ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1); vce_v2_0_init_cg(adev); } @@ -474,11 +495,11 @@ static bool vce_v2_0_is_idle(void *handle) static int vce_v2_0_wait_for_idle(void *handle) { - unsigned i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + unsigned i; for (i = 0; i < adev->usec_timeout; i++) { - if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK)) + if (vce_v2_0_is_idle(handle)) return 0; } return -ETIMEDOUT; @@ -488,8 +509,7 @@ static int vce_v2_0_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK, - ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK); + WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1); mdelay(5); return vce_v2_0_start(adev); @@ -516,10 +536,8 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev, DRM_DEBUG("IH: VCE\n"); switch (entry->src_data) { case 0: - amdgpu_fence_process(&adev->vce.ring[0]); - break; case 1: - amdgpu_fence_process(&adev->vce.ring[1]); + amdgpu_fence_process(&adev->vce.ring[entry->src_data]); break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index c271abffd8dd..615b8b16ad04 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -37,6 +37,9 @@ #include "gca/gfx_8_0_d.h" #include "smu/smu_7_1_2_d.h" #include "smu/smu_7_1_2_sh_mask.h" +#include "gca/gfx_8_0_d.h" +#include "gca/gfx_8_0_sh_mask.h" + #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 @@ -107,102 +110,72 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring) static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) { - u32 tmp, data; - - tmp = data = RREG32(mmVCE_RB_ARB_CTRL); - if (override) - data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK; - else - data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK; - - if (tmp != data) - WREG32(mmVCE_RB_ARB_CTRL, data); + WREG32_FIELD(VCE_RB_ARB_CTRL, VCE_CGTT_OVERRIDE, override ? 1 : 0); } static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, bool gated) { - u32 tmp, data; + u32 data; + /* Set Override to disable Clock Gating */ vce_v3_0_override_vce_clock_gating(adev, true); - if (!gated) { - /* Force CLOCK ON for VCE_CLOCK_GATING_B, - * {*_FORCE_ON, *_FORCE_OFF} = {1, 0} - * VREG can be FORCE ON or set to Dynamic, but can't be OFF - */ - tmp = data = RREG32(mmVCE_CLOCK_GATING_B); + /* This function enables MGCG which is controlled by firmware. + With the clocks in the gated state the core is still + accessible but the firmware will throttle the clocks on the + fly as necessary. + */ + if (gated) { + data = RREG32(mmVCE_CLOCK_GATING_B); data |= 0x1ff; data &= ~0xef0000; - if (tmp != data) - WREG32(mmVCE_CLOCK_GATING_B, data); + WREG32(mmVCE_CLOCK_GATING_B, data); - /* Force CLOCK ON for VCE_UENC_CLOCK_GATING, - * {*_FORCE_ON, *_FORCE_OFF} = {1, 0} - */ - tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING); + data = RREG32(mmVCE_UENC_CLOCK_GATING); data |= 0x3ff000; data &= ~0xffc00000; - if (tmp != data) - WREG32(mmVCE_UENC_CLOCK_GATING, data); + WREG32(mmVCE_UENC_CLOCK_GATING, data); - /* set VCE_UENC_CLOCK_GATING_2 */ - tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2); + data = RREG32(mmVCE_UENC_CLOCK_GATING_2); data |= 0x2; - data &= ~0x2; - if (tmp != data) - WREG32(mmVCE_UENC_CLOCK_GATING_2, data); + data &= ~0x00010000; + WREG32(mmVCE_UENC_CLOCK_GATING_2, data); - /* Force CLOCK ON for VCE_UENC_REG_CLOCK_GATING */ - tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); data |= 0x37f; - if (tmp != data) - WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); + WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); - /* Force VCE_UENC_DMA_DCLK_CTRL Clock ON */ - tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); + data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | - 0x8; - if (tmp != data) - WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); + VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | + VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | + 0x8; + WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); } else { - /* Force CLOCK OFF for VCE_CLOCK_GATING_B, - * {*, *_FORCE_OFF} = {*, 1} - * set VREG to Dynamic, as it can't be OFF - */ - tmp = data = RREG32(mmVCE_CLOCK_GATING_B); + data = RREG32(mmVCE_CLOCK_GATING_B); data &= ~0x80010; data |= 0xe70008; - if (tmp != data) - WREG32(mmVCE_CLOCK_GATING_B, data); - /* Force CLOCK OFF for VCE_UENC_CLOCK_GATING, - * Force ClOCK OFF takes precedent over Force CLOCK ON setting. - * {*_FORCE_ON, *_FORCE_OFF} = {*, 1} - */ - tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING); + WREG32(mmVCE_CLOCK_GATING_B, data); + + data = RREG32(mmVCE_UENC_CLOCK_GATING); data |= 0xffc00000; - if (tmp != data) - WREG32(mmVCE_UENC_CLOCK_GATING, data); - /* Set VCE_UENC_CLOCK_GATING_2 */ - tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2); + WREG32(mmVCE_UENC_CLOCK_GATING, data); + + data = RREG32(mmVCE_UENC_CLOCK_GATING_2); data |= 0x10000; - if (tmp != data) - WREG32(mmVCE_UENC_CLOCK_GATING_2, data); - /* Set VCE_UENC_REG_CLOCK_GATING to dynamic */ - tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + WREG32(mmVCE_UENC_CLOCK_GATING_2, data); + + data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); data &= ~0xffc00000; - if (tmp != data) - WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); - /* Set VCE_UENC_DMA_DCLK_CTRL CG always in dynamic mode */ - tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); + WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); + + data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | - VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | - 0x8); - if (tmp != data) - WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); + VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK | + VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK | + 0x8); + WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data); } vce_v3_0_override_vce_clock_gating(adev, false); } @@ -221,12 +194,9 @@ static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev) } DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); - WREG32_P(mmVCE_SOFT_RESET, - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); mdelay(10); - WREG32_P(mmVCE_SOFT_RESET, 0, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); mdelay(10); } @@ -264,38 +234,22 @@ static int vce_v3_0_start(struct amdgpu_device *adev) if (adev->vce.harvest_config & (1 << idx)) continue; - if (idx == 0) - WREG32_P(mmGRBM_GFX_INDEX, 0, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); - else - WREG32_P(mmGRBM_GFX_INDEX, - GRBM_GFX_INDEX__VCE_INSTANCE_MASK, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); - + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); vce_v3_0_mc_resume(adev, idx); - - WREG32_P(mmVCE_STATUS, VCE_STATUS__JOB_BUSY_MASK, - ~VCE_STATUS__JOB_BUSY_MASK); + WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); if (adev->asic_type >= CHIP_STONEY) WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); else - WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, - ~VCE_VCPU_CNTL__CLK_EN_MASK); - - WREG32_P(mmVCE_SOFT_RESET, 0, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0); mdelay(100); r = vce_v3_0_firmware_loaded(adev); /* clear BUSY flag */ - WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); - - /* Set Clock-Gating off */ - if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) - vce_v3_0_set_vce_sw_clock_gating(adev, false); + WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0); if (r) { DRM_ERROR("VCE not responding, giving up!!!\n"); @@ -304,7 +258,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) } } - WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -319,33 +273,25 @@ static int vce_v3_0_stop(struct amdgpu_device *adev) if (adev->vce.harvest_config & (1 << idx)) continue; - if (idx == 0) - WREG32_P(mmGRBM_GFX_INDEX, 0, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); - else - WREG32_P(mmGRBM_GFX_INDEX, - GRBM_GFX_INDEX__VCE_INSTANCE_MASK, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); if (adev->asic_type >= CHIP_STONEY) WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); else - WREG32_P(mmVCE_VCPU_CNTL, 0, - ~VCE_VCPU_CNTL__CLK_EN_MASK); + WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 0); + /* hold on ECPU */ - WREG32_P(mmVCE_SOFT_RESET, - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); + WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1); /* clear BUSY flag */ - WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); + WREG32_FIELD(VCE_STATUS, JOB_BUSY, 0); /* Set Clock-Gating off */ if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) vce_v3_0_set_vce_sw_clock_gating(adev, false); } - WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -534,7 +480,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); - WREG32(mmVCE_CLOCK_GATING_B, 0xf7); + WREG32(mmVCE_CLOCK_GATING_B, 0x1FF); WREG32(mmVCE_LMI_CTRL, 0x00398000); WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); @@ -573,9 +519,7 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) } WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); - - WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK, - ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); + WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1); } static bool vce_v3_0_is_idle(void *handle) @@ -601,20 +545,108 @@ static int vce_v3_0_wait_for_idle(void *handle) return -ETIMEDOUT; } +#define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */ +#define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */ +#define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */ +#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \ + VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK) + +static int vce_v3_0_check_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 srbm_soft_reset = 0; + + /* According to VCE team , we should use VCE_STATUS instead + * SRBM_STATUS.VCE_BUSY bit for busy status checking. + * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE + * instance's registers are accessed + * (0 for 1st instance, 10 for 2nd instance). + * + *VCE_STATUS + *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB | + *|----+----+-----------+----+----+----+----------+---------+----| + *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0| + * + * VCE team suggest use bit 3--bit 6 for busy status check + */ + mutex_lock(&adev->grbm_idx_mutex); + WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); + if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); + } + WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); + if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); + srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); + } + WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); + + if (srbm_soft_reset) { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true; + adev->vce.srbm_soft_reset = srbm_soft_reset; + } else { + adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = false; + adev->vce.srbm_soft_reset = 0; + } + mutex_unlock(&adev->grbm_idx_mutex); + return 0; +} + static int vce_v3_0_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 mask = 0; + u32 srbm_soft_reset; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) + return 0; + srbm_soft_reset = adev->vce.srbm_soft_reset; + + if (srbm_soft_reset) { + u32 tmp; + + tmp = RREG32(mmSRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(mmSRBM_SOFT_RESET, tmp); + tmp = RREG32(mmSRBM_SOFT_RESET); + + /* Wait a little for things to settle down */ + udelay(50); + } + + return 0; +} + +static int vce_v3_0_pre_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) + return 0; + + mdelay(5); - mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK; - mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK; + return vce_v3_0_suspend(adev); +} + + +static int vce_v3_0_post_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) + return 0; - WREG32_P(mmSRBM_SOFT_RESET, mask, - ~(SRBM_SOFT_RESET__SOFT_RESET_VCE0_MASK | - SRBM_SOFT_RESET__SOFT_RESET_VCE1_MASK)); mdelay(5); - return vce_v3_0_start(adev); + return vce_v3_0_resume(adev); } static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev, @@ -637,9 +669,7 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, { DRM_DEBUG("IH: VCE\n"); - WREG32_P(mmVCE_SYS_INT_STATUS, - VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK, - ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK); + WREG32_FIELD(VCE_SYS_INT_STATUS, VCE_SYS_INT_TRAP_INTERRUPT_INT, 1); switch (entry->src_data) { case 0: @@ -686,13 +716,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, if (adev->vce.harvest_config & (1 << i)) continue; - if (i == 0) - WREG32_P(mmGRBM_GFX_INDEX, 0, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); - else - WREG32_P(mmGRBM_GFX_INDEX, - GRBM_GFX_INDEX__VCE_INSTANCE_MASK, - ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i); if (enable) { /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ @@ -711,7 +735,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, vce_v3_0_set_vce_sw_clock_gating(adev, enable); } - WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); + WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -751,7 +775,10 @@ const struct amd_ip_funcs vce_v3_0_ip_funcs = { .resume = vce_v3_0_resume, .is_idle = vce_v3_0_is_idle, .wait_for_idle = vce_v3_0_wait_for_idle, + .check_soft_reset = vce_v3_0_check_soft_reset, + .pre_soft_reset = vce_v3_0_pre_soft_reset, .soft_reset = vce_v3_0_soft_reset, + .post_soft_reset = vce_v3_0_post_soft_reset, .set_clockgating_state = vce_v3_0_set_clockgating_state, .set_powergating_state = vce_v3_0_set_powergating_state, }; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 03a31c53aec3..f2e8aa1a0dbd 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -77,6 +77,7 @@ #if defined(CONFIG_DRM_AMD_ACP) #include "amdgpu_acp.h" #endif +#include "dce_virtual.h" MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); @@ -822,6 +823,60 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version topaz_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 7, + .minor = 4, + .rev = 0, + .funcs = &gmc_v7_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 2, + .minor = 4, + .rev = 0, + .funcs = &iceland_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 1, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 2, + .minor = 4, + .rev = 0, + .funcs = &sdma_v2_4_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version tonga_ip_blocks[] = { /* ORDER MATTERS! */ @@ -890,6 +945,74 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version tonga_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gmc_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &tonga_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 1, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 10, + .minor = 0, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &sdma_v3_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 5, + .minor = 0, + .rev = 0, + .funcs = &uvd_v5_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &vce_v3_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version fiji_ip_blocks[] = { /* ORDER MATTERS! */ @@ -958,6 +1081,74 @@ static const struct amdgpu_ip_block_version fiji_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version fiji_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 8, + .minor = 5, + .rev = 0, + .funcs = &gmc_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &tonga_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 1, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 10, + .minor = 1, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &sdma_v3_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 6, + .minor = 0, + .rev = 0, + .funcs = &uvd_v6_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &vce_v3_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version polaris11_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1026,6 +1217,74 @@ static const struct amdgpu_ip_block_version polaris11_ip_blocks[] = }, }; +static const struct amdgpu_ip_block_version polaris11_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 8, + .minor = 1, + .rev = 0, + .funcs = &gmc_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 3, + .minor = 1, + .rev = 0, + .funcs = &tonga_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 7, + .minor = 2, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 11, + .minor = 2, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 3, + .minor = 1, + .rev = 0, + .funcs = &sdma_v3_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 6, + .minor = 3, + .rev = 0, + .funcs = &uvd_v6_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 3, + .minor = 4, + .rev = 0, + .funcs = &vce_v3_0_ip_funcs, + }, +}; + static const struct amdgpu_ip_block_version cz_ip_blocks[] = { /* ORDER MATTERS! */ @@ -1103,34 +1362,142 @@ static const struct amdgpu_ip_block_version cz_ip_blocks[] = #endif }; +static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] = +{ + /* ORDER MATTERS! */ + { + .type = AMD_IP_BLOCK_TYPE_COMMON, + .major = 2, + .minor = 0, + .rev = 0, + .funcs = &vi_common_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GMC, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gmc_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_IH, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &cz_ih_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_pp_ip_funcs + }, + { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 11, + .minor = 0, + .rev = 0, + .funcs = &dce_virtual_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_GFX, + .major = 8, + .minor = 0, + .rev = 0, + .funcs = &gfx_v8_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &sdma_v3_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_UVD, + .major = 6, + .minor = 0, + .rev = 0, + .funcs = &uvd_v6_0_ip_funcs, + }, + { + .type = AMD_IP_BLOCK_TYPE_VCE, + .major = 3, + .minor = 0, + .rev = 0, + .funcs = &vce_v3_0_ip_funcs, + }, +#if defined(CONFIG_DRM_AMD_ACP) + { + .type = AMD_IP_BLOCK_TYPE_ACP, + .major = 2, + .minor = 2, + .rev = 0, + .funcs = &acp_ip_funcs, + }, +#endif +}; + int vi_set_ip_blocks(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_TOPAZ: - adev->ip_blocks = topaz_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); - break; - case CHIP_FIJI: - adev->ip_blocks = fiji_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks); - break; - case CHIP_TONGA: - adev->ip_blocks = tonga_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); - break; - case CHIP_POLARIS11: - case CHIP_POLARIS10: - adev->ip_blocks = polaris11_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks); - break; - case CHIP_CARRIZO: - case CHIP_STONEY: - adev->ip_blocks = cz_ip_blocks; - adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks); - break; - default: - /* FIXME: not supported yet */ - return -EINVAL; + if (adev->enable_virtual_display) { + switch (adev->asic_type) { + case CHIP_TOPAZ: + adev->ip_blocks = topaz_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks_vd); + break; + case CHIP_FIJI: + adev->ip_blocks = fiji_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks_vd); + break; + case CHIP_TONGA: + adev->ip_blocks = tonga_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks_vd); + break; + case CHIP_POLARIS11: + case CHIP_POLARIS10: + adev->ip_blocks = polaris11_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks_vd); + break; + + case CHIP_CARRIZO: + case CHIP_STONEY: + adev->ip_blocks = cz_ip_blocks_vd; + adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks_vd); + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } + } else { + switch (adev->asic_type) { + case CHIP_TOPAZ: + adev->ip_blocks = topaz_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); + break; + case CHIP_FIJI: + adev->ip_blocks = fiji_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks); + break; + case CHIP_TONGA: + adev->ip_blocks = tonga_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); + break; + case CHIP_POLARIS11: + case CHIP_POLARIS10: + adev->ip_blocks = polaris11_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks); + break; + case CHIP_CARRIZO: + case CHIP_STONEY: + adev->ip_blocks = cz_ip_blocks; + adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks); + break; + default: + /* FIXME: not supported yet */ + return -EINVAL; + } } return 0; @@ -1248,8 +1615,17 @@ static int vi_common_early_init(void *handle) AMD_CG_SUPPORT_HDP_MGCG | AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_SDMA_MGCG | - AMD_CG_SUPPORT_SDMA_LS; + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_VCE_MGCG; + /* rev0 hardware requires workarounds to support PG */ adev->pg_flags = 0; + if (adev->rev_id != 0x00) { + adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_PIPELINE | + AMD_PG_SUPPORT_UVD | + AMD_PG_SUPPORT_VCE; + } adev->external_rev_id = adev->rev_id + 0x1; break; case CHIP_STONEY: @@ -1267,7 +1643,13 @@ static int vi_common_early_init(void *handle) AMD_CG_SUPPORT_HDP_MGCG | AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_SDMA_MGCG | - AMD_CG_SUPPORT_SDMA_LS; + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_VCE_MGCG; + adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_PIPELINE | + AMD_PG_SUPPORT_UVD | + AMD_PG_SUPPORT_VCE; adev->external_rev_id = adev->rev_id + 0x1; break; default: diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index a74a0d2ff1ca..db710418f35f 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -159,8 +159,14 @@ struct amd_ip_funcs { bool (*is_idle)(void *handle); /* poll for idle */ int (*wait_for_idle)(void *handle); + /* check soft reset the IP block */ + int (*check_soft_reset)(void *handle); + /* pre soft reset the IP block */ + int (*pre_soft_reset)(void *handle); /* soft reset the IP block */ int (*soft_reset)(void *handle); + /* post soft reset the IP block */ + int (*post_soft_reset)(void *handle); /* enable/disable cg for the IP block */ int (*set_clockgating_state)(void *handle, enum amd_clockgating_state state); diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h index f3e53b118361..19802e96417e 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_2_d.h @@ -34,6 +34,7 @@ #define mmUVD_UDEC_ADDR_CONFIG 0x3bd3 #define mmUVD_UDEC_DB_ADDR_CONFIG 0x3bd4 #define mmUVD_UDEC_DBW_ADDR_CONFIG 0x3bd5 +#define mmUVD_NO_OP 0x3bff #define mmUVD_SEMA_CNTL 0x3d00 #define mmUVD_LMI_EXT40_ADDR 0x3d26 #define mmUVD_CTX_INDEX 0x3d28 diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h index eb4cf53427da..cc972d237a7e 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_5_0_d.h @@ -34,6 +34,7 @@ #define mmUVD_UDEC_ADDR_CONFIG 0x3bd3 #define mmUVD_UDEC_DB_ADDR_CONFIG 0x3bd4 #define mmUVD_UDEC_DBW_ADDR_CONFIG 0x3bd5 +#define mmUVD_NO_OP 0x3bff #define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW 0x3c69 #define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH 0x3c68 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW 0x3c67 diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h index ec69869c55ff..378f4b6b43da 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h @@ -35,6 +35,7 @@ #define mmUVD_UDEC_DB_ADDR_CONFIG 0x3bd4 #define mmUVD_UDEC_DBW_ADDR_CONFIG 0x3bd5 #define mmUVD_POWER_STATUS_U 0x3bfd +#define mmUVD_NO_OP 0x3bff #define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW 0x3c69 #define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH 0x3c68 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW 0x3c67 diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index abbb658bdc1e..2de34a5a85c2 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -31,6 +31,7 @@ #include "eventmanager.h" #include "pp_debug.h" + #define PP_CHECK(handle) \ do { \ if ((handle) == NULL || (handle)->pp_valid != PP_VALID) \ @@ -162,12 +163,12 @@ static int pp_hw_fini(void *handle) pp_handle = (struct pp_instance *)handle; eventmgr = pp_handle->eventmgr; - if (eventmgr != NULL || eventmgr->pp_eventmgr_fini != NULL) + if (eventmgr != NULL && eventmgr->pp_eventmgr_fini != NULL) eventmgr->pp_eventmgr_fini(eventmgr); smumgr = pp_handle->smu_mgr; - if (smumgr != NULL || smumgr->smumgr_funcs != NULL || + if (smumgr != NULL && smumgr->smumgr_funcs != NULL && smumgr->smumgr_funcs->smu_fini != NULL) smumgr->smumgr_funcs->smu_fini(smumgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile index f7ce4cb71346..abbcbc9f6eca 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile @@ -4,13 +4,15 @@ HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \ hardwaremanager.o pp_acpi.o cz_hwmgr.o \ - cz_clockpowergating.o \ + cz_clockpowergating.o tonga_powertune.o\ tonga_processpptables.o ppatomctrl.o \ tonga_hwmgr.o pppcielanes.o tonga_thermal.o\ fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \ fiji_clockpowergating.o fiji_thermal.o \ polaris10_hwmgr.o polaris10_powertune.o polaris10_thermal.o \ - polaris10_clockpowergating.o + polaris10_clockpowergating.o iceland_hwmgr.o \ + iceland_clockpowergating.o iceland_thermal.o \ + iceland_powertune.o AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 8cc0df9b534a..5ecef1732e20 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -178,7 +178,6 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) int result; cz_hwmgr->gfx_ramp_step = 256*25/100; - cz_hwmgr->gfx_ramp_delay = 1; /* by default, we delay 1us */ for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) @@ -186,33 +185,19 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) cz_hwmgr->mgcg_cgtt_local0 = 0x00000000; cz_hwmgr->mgcg_cgtt_local1 = 0x00000000; - cz_hwmgr->clock_slow_down_freq = 25000; - cz_hwmgr->skip_clock_slow_down = 1; - cz_hwmgr->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */ - cz_hwmgr->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */ - cz_hwmgr->voting_rights_clients = 0x00C00033; - cz_hwmgr->static_screen_threshold = 8; - cz_hwmgr->ddi_power_gating_disabled = 0; - cz_hwmgr->bapm_enabled = 1; - cz_hwmgr->voltage_drop_threshold = 0; - cz_hwmgr->gfx_power_gating_threshold = 500; - cz_hwmgr->vce_slow_sclk_threshold = 20000; - cz_hwmgr->dce_slow_sclk_threshold = 30000; - cz_hwmgr->disable_driver_thermal_policy = 1; - cz_hwmgr->disable_nb_ps3_in_battery = 0; phm_cap_unset(hwmgr->platform_descriptor.platformCaps, @@ -221,9 +206,6 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NonABMSupportInPPLib); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicM3Arbiter); @@ -233,9 +215,7 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_DynamicPatchPowerState); cz_hwmgr->thermal_auto_throttling_treshold = 0; - cz_hwmgr->tdr_clock = 0; - cz_hwmgr->disable_gfx_power_gating_in_uvd = 0; phm_cap_set(hwmgr->platform_descriptor.platformCaps, @@ -450,19 +430,12 @@ static int cz_construct_boot_state(struct pp_hwmgr *hwmgr) (uint8_t)cz_hwmgr->sys_info.bootup_nb_voltage_index; cz_hwmgr->boot_power_level.dsDividerIndex = 0; - cz_hwmgr->boot_power_level.ssDividerIndex = 0; - cz_hwmgr->boot_power_level.allowGnbSlow = 1; - cz_hwmgr->boot_power_level.forceNBPstate = 0; - cz_hwmgr->boot_power_level.hysteresis_up = 0; - cz_hwmgr->boot_power_level.numSIMDToPowerDown = 0; - cz_hwmgr->boot_power_level.display_wm = 0; - cz_hwmgr->boot_power_level.vce_wm = 0; return 0; @@ -749,7 +722,6 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr, cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk; clock = hwmgr->display_config.min_core_set_clock; -; if (clock == 0) printk(KERN_INFO "[ powerplay ] min_core_set_clock not set\n"); @@ -832,7 +804,7 @@ static int cz_tf_set_watermark_threshold(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetWatermarkFrequency, - cz_hwmgr->sclk_dpm.soft_max_clk); + cz_hwmgr->sclk_dpm.soft_max_clk); return 0; } @@ -858,9 +830,9 @@ static int cz_tf_enable_nb_dpm(struct pp_hwmgr *hwmgr, PP_DBG_LOG("enabling ALL SMU features.\n"); dpm_features |= NB_DPM_MASK; ret = smum_send_msg_to_smc_with_parameter( - hwmgr->smumgr, - PPSMC_MSG_EnableAllSmuFeatures, - dpm_features); + hwmgr->smumgr, + PPSMC_MSG_EnableAllSmuFeatures, + dpm_features); if (ret == 0) cz_hwmgr->is_nb_dpm_enabled = true; } @@ -1246,7 +1218,7 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr) static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) { - if (hwmgr != NULL || hwmgr->backend != NULL) { + if (hwmgr != NULL && hwmgr->backend != NULL) { kfree(hwmgr->backend); kfree(hwmgr); } @@ -1402,10 +1374,12 @@ int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) PPSMC_MSG_SetUvdHardMin)); cz_enable_disable_uvd_dpm(hwmgr, true); - } else + } else { cz_enable_disable_uvd_dpm(hwmgr, true); - } else + } + } else { cz_enable_disable_uvd_dpm(hwmgr, false); + } return 0; } @@ -1690,13 +1664,10 @@ static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); if (separation_time != - hw_data->cc6_settings.cpu_pstate_separation_time - || cc6_disable != - hw_data->cc6_settings.cpu_cc6_disable - || pstate_disable != - hw_data->cc6_settings.cpu_pstate_disable - || pstate_switch_disable != - hw_data->cc6_settings.nb_pstate_switch_disable) { + hw_data->cc6_settings.cpu_pstate_separation_time || + cc6_disable != hw_data->cc6_settings.cpu_cc6_disable || + pstate_disable != hw_data->cc6_settings.cpu_pstate_disable || + pstate_switch_disable != hw_data->cc6_settings.nb_pstate_switch_disable) { hw_data->cc6_settings.cc6_setting_changed = true; @@ -1799,8 +1770,7 @@ static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p ps = cast_const_PhwCzPowerState(state); level_index = index > ps->level - 1 ? ps->level - 1 : index; - - level->coreClock = ps->levels[level_index].engineClock; + level->coreClock = ps->levels[level_index].engineClock; if (designation == PHM_PerformanceLevelDesignation_PowerContainment) { for (i = 1; i < ps->level; i++) { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c index 120a9e2c3152..9368e21f5695 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c @@ -22,7 +22,6 @@ */ #include <linux/module.h> #include <linux/slab.h> -#include <linux/fb.h> #include "linux/delay.h" #include "hwmgr.h" @@ -619,9 +618,6 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TablelessHardwareInterface); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep); - data->gpio_debug = 0; phm_cap_set(hwmgr->platform_descriptor.platformCaps, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c index 44658451a8d2..f5992ea0c56f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c @@ -57,8 +57,6 @@ void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) /* Assume disabled */ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CAC); phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping); @@ -77,9 +75,8 @@ void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) fiji_hwmgr->fast_watermark_threshold = 100; - if (hwmgr->powercontainment_enabled) { - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { tmp = 1; fiji_hwmgr->enable_dte_feature = tmp ? false : true; fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 27e07624ac28..d829076ed9ea 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -39,6 +39,26 @@ extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr); extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr); extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr); extern int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr); +extern int iceland_hwmgr_init(struct pp_hwmgr *hwmgr); + +static int hwmgr_set_features_platform_caps(struct pp_hwmgr *hwmgr) +{ + if (amdgpu_sclk_deep_sleep_en) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep); + else + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep); + + if (amdgpu_powercontainment) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + else + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + + return 0; +} int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) { @@ -57,9 +77,12 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) hwmgr->chip_family = pp_init->chip_family; hwmgr->chip_id = pp_init->chip_id; hwmgr->hw_revision = pp_init->rev_id; + hwmgr->sub_sys_id = pp_init->sub_sys_id; + hwmgr->sub_vendor_id = pp_init->sub_vendor_id; hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; hwmgr->power_source = PP_PowerSource_AC; - hwmgr->powercontainment_enabled = pp_init->powercontainment_enabled; + + hwmgr_set_features_platform_caps(hwmgr); switch (hwmgr->chip_family) { case AMDGPU_FAMILY_CZ: @@ -67,6 +90,9 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) break; case AMDGPU_FAMILY_VI: switch (hwmgr->chip_id) { + case CHIP_TOPAZ: + iceland_hwmgr_init(hwmgr); + break; case CHIP_TONGA: tonga_hwmgr_init(hwmgr); break; @@ -182,29 +208,7 @@ int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index, return 0; } -int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, - uint32_t index, uint32_t value, uint32_t mask) -{ - uint32_t i; - uint32_t cur_value; - if (hwmgr == NULL || hwmgr->device == NULL) { - printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!"); - return -EINVAL; - } - - for (i = 0; i < hwmgr->usec_timeout; i++) { - cur_value = cgs_read_register(hwmgr->device, index); - if ((cur_value & mask) != (value & mask)) - break; - udelay(1); - } - - /* timeout means wrong logic*/ - if (i == hwmgr->usec_timeout) - return -1; - return 0; -} /** @@ -227,21 +231,7 @@ void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, phm_wait_on_register(hwmgr, indirect_port + 1, mask, value); } -void phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr, - uint32_t indirect_port, - uint32_t index, - uint32_t value, - uint32_t mask) -{ - if (hwmgr == NULL || hwmgr->device == NULL) { - printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!"); - return; - } - cgs_write_register(hwmgr->device, indirect_port, index); - phm_wait_for_register_unequal(hwmgr, indirect_port + 1, - value, mask); -} bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr) { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c new file mode 100644 index 000000000000..47949f5cd073 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c @@ -0,0 +1,119 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui <ray.huang@amd.com> + * + */ + +#include "hwmgr.h" +#include "iceland_clockpowergating.h" +#include "ppsmc.h" +#include "iceland_hwmgr.h" + +int iceland_phm_powerdown_uvd(struct pp_hwmgr *hwmgr) +{ + /* iceland does not have MM hardware block */ + return 0; +} + +static int iceland_phm_powerup_uvd(struct pp_hwmgr *hwmgr) +{ + /* iceland does not have MM hardware block */ + return 0; +} + +static int iceland_phm_powerdown_vce(struct pp_hwmgr *hwmgr) +{ + /* iceland does not have MM hardware block */ + return 0; +} + +static int iceland_phm_powerup_vce(struct pp_hwmgr *hwmgr) +{ + /* iceland does not have MM hardware block */ + return 0; +} + +int iceland_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum + PHM_AsicBlock block, enum PHM_ClockGateSetting gating) +{ + int ret = 0; + + switch (block) { + case PHM_AsicBlock_UVD_MVC: + case PHM_AsicBlock_UVD: + case PHM_AsicBlock_UVD_HD: + case PHM_AsicBlock_UVD_SD: + if (gating == PHM_ClockGateSetting_StaticOff) + ret = iceland_phm_powerdown_uvd(hwmgr); + else + ret = iceland_phm_powerup_uvd(hwmgr); + break; + case PHM_AsicBlock_GFX: + default: + break; + } + + return ret; +} + +int iceland_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + data->uvd_power_gated = false; + data->vce_power_gated = false; + + iceland_phm_powerup_uvd(hwmgr); + iceland_phm_powerup_vce(hwmgr); + + return 0; +} + +int iceland_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) +{ + if (bgate) { + iceland_update_uvd_dpm(hwmgr, true); + iceland_phm_powerdown_uvd(hwmgr); + } else { + iceland_phm_powerup_uvd(hwmgr); + iceland_update_uvd_dpm(hwmgr, false); + } + + return 0; +} + +int iceland_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) +{ + if (bgate) + return iceland_phm_powerdown_vce(hwmgr); + else + return iceland_phm_powerup_vce(hwmgr); + + return 0; +} + +int iceland_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, + const uint32_t *msg_id) +{ + /* iceland does not have MM hardware block */ + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h new file mode 100644 index 000000000000..ff5ef00c7c68 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h @@ -0,0 +1,38 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui <ray.huang@amd.com> + * + */ + +#ifndef _ICELAND_CLOCK_POWER_GATING_H_ +#define _ICELAND_CLOCK_POWER_GATING_H_ + +#include "iceland_hwmgr.h" +#include "pp_asicblocks.h" + +extern int iceland_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating); +extern int iceland_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate); +extern int iceland_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate); +extern int iceland_phm_powerdown_uvd(struct pp_hwmgr *hwmgr); +extern int iceland_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr); +extern int iceland_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id); +#endif /* _ICELAND_CLOCK_POWER_GATING_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h new file mode 100644 index 000000000000..a7b4bc6caea2 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h @@ -0,0 +1,41 @@ +#ifndef ICELAND_DYN_DEFAULTS_H +#define ICELAND_DYN_DEFAULTS_H + +enum ICELANDdpm_TrendDetection +{ + ICELANDdpm_TrendDetection_AUTO, + ICELANDdpm_TrendDetection_UP, + ICELANDdpm_TrendDetection_DOWN +}; +typedef enum ICELANDdpm_TrendDetection ICELANDdpm_TrendDetection; + + +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 +#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 + + +#define PPICELAND_THERMALPROTECTCOUNTER_DFLT 0x200 + +#define PPICELAND_STATICSCREENTHRESHOLDUNIT_DFLT 0 + +#define PPICELAND_STATICSCREENTHRESHOLD_DFLT 0x00C8 + +#define PPICELAND_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 + +#define PPICELAND_REFERENCEDIVIDER_DFLT 4 + +#define PPICELAND_ULVVOLTAGECHANGEDELAY_DFLT 1687 + +#define PPICELAND_CGULVPARAMETER_DFLT 0x00040035 +#define PPICELAND_CGULVCONTROL_DFLT 0x00007450 +#define PPICELAND_TARGETACTIVITY_DFLT 30 +#define PPICELAND_MCLK_TARGETACTIVITY_DFLT 10 + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c new file mode 100644 index 000000000000..8a7ada50551c --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c @@ -0,0 +1,5692 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui <ray.huang@amd.com> + * + */ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/fb.h> +#include "linux/delay.h" +#include "pp_acpi.h" +#include "hwmgr.h" +#include <atombios.h> +#include "iceland_hwmgr.h" +#include "pptable.h" +#include "processpptables.h" +#include "pp_debug.h" +#include "ppsmc.h" +#include "cgs_common.h" +#include "pppcielanes.h" +#include "iceland_dyn_defaults.h" +#include "smumgr.h" +#include "iceland_smumgr.h" +#include "iceland_clockpowergating.h" +#include "iceland_thermal.h" +#include "iceland_powertune.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "smu/smu_7_1_1_d.h" +#include "smu/smu_7_1_1_sh_mask.h" + +#include "cgs_linux.h" +#include "eventmgr.h" +#include "amd_pcie_helpers.h" + +#define MC_CG_ARB_FREQ_F0 0x0a +#define MC_CG_ARB_FREQ_F1 0x0b +#define MC_CG_ARB_FREQ_F2 0x0c +#define MC_CG_ARB_FREQ_F3 0x0d + +#define MC_CG_SEQ_DRAMCONF_S0 0x05 +#define MC_CG_SEQ_DRAMCONF_S1 0x06 +#define MC_CG_SEQ_YCLK_SUSPEND 0x04 +#define MC_CG_SEQ_YCLK_RESUME 0x0a + +#define PCIE_BUS_CLK 10000 +#define TCLK (PCIE_BUS_CLK / 10) + +#define SMC_RAM_END 0x40000 +#define SMC_CG_IND_START 0xc0030000 +#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND*/ + +#define VOLTAGE_SCALE 4 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 + +const uint32_t iceland_magic = (uint32_t)(PHM_VIslands_Magic); + +#define MC_SEQ_MISC0_GDDR5_SHIFT 28 +#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 +#define MC_SEQ_MISC0_GDDR5_VALUE 5 + +/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ +enum DPM_EVENT_SRC { + DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */ + DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */ + DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */ + DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */ + DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */ +}; + +static int iceland_read_clock_registers(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + data->clock_registers.vCG_SPLL_FUNC_CNTL = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL); + data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2); + data->clock_registers.vCG_SPLL_FUNC_CNTL_3 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3); + data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4); + data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM); + data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 = + cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2); + data->clock_registers.vDLL_CNTL = + cgs_read_register(hwmgr->device, mmDLL_CNTL); + data->clock_registers.vMCLK_PWRMGT_CNTL = + cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL); + data->clock_registers.vMPLL_AD_FUNC_CNTL = + cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL); + data->clock_registers.vMPLL_DQ_FUNC_CNTL = + cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL); + data->clock_registers.vMPLL_FUNC_CNTL = + cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL); + data->clock_registers.vMPLL_FUNC_CNTL_1 = + cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1); + data->clock_registers.vMPLL_FUNC_CNTL_2 = + cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2); + data->clock_registers.vMPLL_SS1 = + cgs_read_register(hwmgr->device, mmMPLL_SS1); + data->clock_registers.vMPLL_SS2 = + cgs_read_register(hwmgr->device, mmMPLL_SS2); + + return 0; +} + +/** + * Find out if memory is GDDR5. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_get_memory_type(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + uint32_t temp; + + temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0); + + data->is_memory_GDDR5 = (MC_SEQ_MISC0_GDDR5_VALUE == + ((temp & MC_SEQ_MISC0_GDDR5_MASK) >> + MC_SEQ_MISC0_GDDR5_SHIFT)); + + return 0; +} + +int iceland_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) +{ + /* iceland does not have MM hardware blocks */ + return 0; +} + +/** + * Enables Dynamic Power Management by SMC + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_enable_acpi_power_management(struct pp_hwmgr *hwmgr) +{ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, STATIC_PM_EN, 1); + + return 0; +} + +/** + * Find the MC microcode version and store it in the HwMgr struct + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_get_mc_microcode_version(struct pp_hwmgr *hwmgr) +{ + cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); + + hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); + + return 0; +} + +static int iceland_init_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + data->low_sclk_interrupt_threshold = 0; + + return 0; +} + + +static int iceland_setup_asic_task(struct pp_hwmgr *hwmgr) +{ + int tmp_result, result = 0; + + tmp_result = iceland_read_clock_registers(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to read clock registers!", result = tmp_result); + + tmp_result = iceland_get_memory_type(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to get memory type!", result = tmp_result); + + tmp_result = iceland_enable_acpi_power_management(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable ACPI power management!", result = tmp_result); + + tmp_result = iceland_get_mc_microcode_version(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to get MC microcode version!", result = tmp_result); + + tmp_result = iceland_init_sclk_threshold(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to init sclk threshold!", result = tmp_result); + + return result; +} + +static bool cf_iceland_voltage_control(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + return ICELAND_VOLTAGE_CONTROL_NONE != data->voltage_control; +} + +/* + * -------------- Voltage Tables ---------------------- + * If the voltage table would be bigger than what will fit into the + * state table on the SMC keep only the higher entries. + */ + +static void iceland_trim_voltage_table_to_fit_state_table( + struct pp_hwmgr *hwmgr, + uint32_t max_voltage_steps, + pp_atomctrl_voltage_table *voltage_table) +{ + unsigned int i, diff; + + if (voltage_table->count <= max_voltage_steps) { + return; + } + + diff = voltage_table->count - max_voltage_steps; + + for (i = 0; i < max_voltage_steps; i++) { + voltage_table->entries[i] = voltage_table->entries[i + diff]; + } + + voltage_table->count = max_voltage_steps; + + return; +} + +/** + * Enable voltage control + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_enable_voltage_control(struct pp_hwmgr *hwmgr) +{ + /* enable voltage control */ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1); + + return 0; +} + +static int iceland_get_svi2_voltage_table(struct pp_hwmgr *hwmgr, + struct phm_clock_voltage_dependency_table *voltage_dependency_table, + pp_atomctrl_voltage_table *voltage_table) +{ + uint32_t i; + + PP_ASSERT_WITH_CODE((NULL != voltage_table), + "Voltage Dependency Table empty.", return -EINVAL;); + + voltage_table->mask_low = 0; + voltage_table->phase_delay = 0; + voltage_table->count = voltage_dependency_table->count; + + for (i = 0; i < voltage_dependency_table->count; i++) { + voltage_table->entries[i].value = + voltage_dependency_table->entries[i].v; + voltage_table->entries[i].smio_low = 0; + } + + return 0; +} + +/** + * Create Voltage Tables. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_construct_voltage_tables(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + int result; + + /* GPIO voltage */ + if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) { + result = atomctrl_get_voltage_table_v3(hwmgr, + VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT, + &data->vddc_voltage_table); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve VDDC table.", return result;); + } else if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { + /* SVI2 VDDC voltage */ + result = iceland_get_svi2_voltage_table(hwmgr, + hwmgr->dyn_state.vddc_dependency_on_mclk, + &data->vddc_voltage_table); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;); + } + + PP_ASSERT_WITH_CODE( + (data->vddc_voltage_table.count <= (SMU71_MAX_LEVELS_VDDC)), + "Too many voltage values for VDDC. Trimming to fit state table.", + iceland_trim_voltage_table_to_fit_state_table(hwmgr, + SMU71_MAX_LEVELS_VDDC, &(data->vddc_voltage_table)); + ); + + /* GPIO */ + if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) { + result = atomctrl_get_voltage_table_v3(hwmgr, + VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, &(data->vddci_voltage_table)); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve VDDCI table.", return result;); + } + + /* SVI2 VDDCI voltage */ + if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) { + result = iceland_get_svi2_voltage_table(hwmgr, + hwmgr->dyn_state.vddci_dependency_on_mclk, + &data->vddci_voltage_table); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve SVI2 VDDCI table from dependancy table.", return result;); + } + + PP_ASSERT_WITH_CODE( + (data->vddci_voltage_table.count <= (SMU71_MAX_LEVELS_VDDCI)), + "Too many voltage values for VDDCI. Trimming to fit state table.", + iceland_trim_voltage_table_to_fit_state_table(hwmgr, + SMU71_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table)); + ); + + + /* GPIO */ + if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { + result = atomctrl_get_voltage_table_v3(hwmgr, + VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, &(data->mvdd_voltage_table)); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve table.", return result;); + } + + /* SVI2 voltage control */ + if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { + result = iceland_get_svi2_voltage_table(hwmgr, + hwmgr->dyn_state.mvdd_dependency_on_mclk, + &data->mvdd_voltage_table); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to retrieve SVI2 MVDD table from dependancy table.", return result;); + } + + PP_ASSERT_WITH_CODE( + (data->mvdd_voltage_table.count <= (SMU71_MAX_LEVELS_MVDD)), + "Too many voltage values for MVDD. Trimming to fit state table.", + iceland_trim_voltage_table_to_fit_state_table(hwmgr, + SMU71_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table)); + ); + + return 0; +} + +/*---------------------------MC----------------------------*/ + +uint8_t iceland_get_memory_module_index(struct pp_hwmgr *hwmgr) +{ + return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16)); +} + +bool iceland_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg) +{ + bool result = true; + + switch (inReg) { + case mmMC_SEQ_RAS_TIMING: + *outReg = mmMC_SEQ_RAS_TIMING_LP; + break; + + case mmMC_SEQ_DLL_STBY: + *outReg = mmMC_SEQ_DLL_STBY_LP; + break; + + case mmMC_SEQ_G5PDX_CMD0: + *outReg = mmMC_SEQ_G5PDX_CMD0_LP; + break; + + case mmMC_SEQ_G5PDX_CMD1: + *outReg = mmMC_SEQ_G5PDX_CMD1_LP; + break; + + case mmMC_SEQ_G5PDX_CTRL: + *outReg = mmMC_SEQ_G5PDX_CTRL_LP; + break; + + case mmMC_SEQ_CAS_TIMING: + *outReg = mmMC_SEQ_CAS_TIMING_LP; + break; + + case mmMC_SEQ_MISC_TIMING: + *outReg = mmMC_SEQ_MISC_TIMING_LP; + break; + + case mmMC_SEQ_MISC_TIMING2: + *outReg = mmMC_SEQ_MISC_TIMING2_LP; + break; + + case mmMC_SEQ_PMG_DVS_CMD: + *outReg = mmMC_SEQ_PMG_DVS_CMD_LP; + break; + + case mmMC_SEQ_PMG_DVS_CTL: + *outReg = mmMC_SEQ_PMG_DVS_CTL_LP; + break; + + case mmMC_SEQ_RD_CTL_D0: + *outReg = mmMC_SEQ_RD_CTL_D0_LP; + break; + + case mmMC_SEQ_RD_CTL_D1: + *outReg = mmMC_SEQ_RD_CTL_D1_LP; + break; + + case mmMC_SEQ_WR_CTL_D0: + *outReg = mmMC_SEQ_WR_CTL_D0_LP; + break; + + case mmMC_SEQ_WR_CTL_D1: + *outReg = mmMC_SEQ_WR_CTL_D1_LP; + break; + + case mmMC_PMG_CMD_EMRS: + *outReg = mmMC_SEQ_PMG_CMD_EMRS_LP; + break; + + case mmMC_PMG_CMD_MRS: + *outReg = mmMC_SEQ_PMG_CMD_MRS_LP; + break; + + case mmMC_PMG_CMD_MRS1: + *outReg = mmMC_SEQ_PMG_CMD_MRS1_LP; + break; + + case mmMC_SEQ_PMG_TIMING: + *outReg = mmMC_SEQ_PMG_TIMING_LP; + break; + + case mmMC_PMG_CMD_MRS2: + *outReg = mmMC_SEQ_PMG_CMD_MRS2_LP; + break; + + case mmMC_SEQ_WR_CTL_2: + *outReg = mmMC_SEQ_WR_CTL_2_LP; + break; + + default: + result = false; + break; + } + + return result; +} + +int iceland_set_s0_mc_reg_index(phw_iceland_mc_reg_table *table) +{ + uint32_t i; + uint16_t address; + + for (i = 0; i < table->last; i++) { + table->mc_reg_address[i].s0 = + iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) + ? address : table->mc_reg_address[i].s1; + } + return 0; +} + +int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, phw_iceland_mc_reg_table *ni_table) +{ + uint8_t i, j; + + PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES), + "Invalid VramInfo table.", return -1); + + for (i = 0; i < table->last; i++) { + ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; + } + ni_table->last = table->last; + + for (i = 0; i < table->num_entries; i++) { + ni_table->mc_reg_table_entry[i].mclk_max = + table->mc_reg_table_entry[i].mclk_max; + for (j = 0; j < table->last; j++) { + ni_table->mc_reg_table_entry[i].mc_data[j] = + table->mc_reg_table_entry[i].mc_data[j]; + } + } + + ni_table->num_entries = table->num_entries; + + return 0; +} + +/** + * VBIOS omits some information to reduce size, we need to recover them here. + * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0]. + * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0] + * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0]. + * 3. need to set these data for each clock range + * + * @param hwmgr the address of the powerplay hardware manager. + * @param table the address of MCRegTable + * @return always 0 + */ +static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr, phw_iceland_mc_reg_table *table) +{ + uint8_t i, j, k; + uint32_t temp_reg; + const iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + for (i = 0, j = table->last; i < table->last; i++) { + PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + switch (table->mc_reg_address[i].s1) { + /* + * mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write + * to mmMC_PMG_CMD_EMRS/_LP[15:0]. Bit[15:0] MRS, need + * to be update mmMC_PMG_CMD_MRS/_LP[15:0] + */ + case mmMC_SEQ_MISC1: + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + ((temp_reg & 0xffff0000)) | + ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); + } + j++; + PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + + if (!data->is_memory_GDDR5) { + table->mc_reg_table_entry[k].mc_data[j] |= 0x100; + } + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + + if (!data->is_memory_GDDR5) { + table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; + table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + } + + break; + + case mmMC_SEQ_RESERVE_M: + temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1); + table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1; + table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + } + j++; + PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE), + "Invalid VramInfo table.", return -1); + break; + + default: + break; + } + + } + + table->last = j; + + return 0; +} + + +static int iceland_set_valid_flag(phw_iceland_mc_reg_table *table) +{ + uint8_t i, j; + for (i = 0; i < table->last; i++) { + for (j = 1; j < table->num_entries; j++) { + if (table->mc_reg_table_entry[j-1].mc_data[i] != + table->mc_reg_table_entry[j].mc_data[i]) { + table->validflag |= (1<<i); + break; + } + } + } + + return 0; +} + +static int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + int result; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + pp_atomctrl_mc_reg_table *table; + phw_iceland_mc_reg_table *ni_table = &data->iceland_mc_reg_table; + uint8_t module_index = iceland_get_memory_module_index(hwmgr); + + table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL); + + if (NULL == table) + return -ENOMEM; + + /* Program additional LP registers that are no longer programmed by VBIOS */ + cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0)); + cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING)); + cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2)); + cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2)); + + memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table)); + + result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table); + + if (0 == result) + result = iceland_copy_vbios_smc_reg_table(table, ni_table); + + if (0 == result) { + iceland_set_s0_mc_reg_index(ni_table); + result = iceland_set_mc_special_registers(hwmgr, ni_table); + } + + if (0 == result) + iceland_set_valid_flag(ni_table); + + kfree(table); + return result; +} + +/** + * Programs static screed detection parameters + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_program_static_screen_threshold_parameters(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* Set static screen threshold unit*/ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT, + data->static_screen_threshold_unit); + /* Set static screen threshold*/ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD, + data->static_screen_threshold); + + return 0; +} + +/** + * Setup display gap for glitch free memory clock switching. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_enable_display_gap(struct pp_hwmgr *hwmgr) +{ + uint32_t display_gap = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); + + display_gap = PHM_SET_FIELD(display_gap, + CG_DISPLAY_GAP_CNTL, DISP_GAP, DISPLAY_GAP_IGNORE); + + display_gap = PHM_SET_FIELD(display_gap, + CG_DISPLAY_GAP_CNTL, DISP_GAP_MCHG, DISPLAY_GAP_VBLANK); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_DISPLAY_GAP_CNTL, display_gap); + + return 0; +} + +/** + * Programs activity state transition voting clients + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_program_voting_clients(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* Clear reset for voting clients before enabling DPM */ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7); + + return 0; +} + +static int iceland_upload_firmware(struct pp_hwmgr *hwmgr) +{ + int ret = 0; + + if (!iceland_is_smc_ram_running(hwmgr->smumgr)) + ret = iceland_smu_upload_firmware_image(hwmgr->smumgr); + + return ret; +} + +/** + * Get the location of various tables inside the FW image. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + */ +int iceland_process_firmware_header(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + uint32_t tmp; + int result; + bool error = 0; + + result = iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, DpmTable), + &tmp, data->sram_end); + + if (0 == result) { + data->dpm_table_start = tmp; + } + + error |= (0 != result); + + result = iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, SoftRegisters), + &tmp, data->sram_end); + + if (0 == result) { + data->soft_regs_start = tmp; + } + + error |= (0 != result); + + + result = iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, mcRegisterTable), + &tmp, data->sram_end); + + if (0 == result) { + data->mc_reg_table_start = tmp; + } + + result = iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, FanTable), + &tmp, data->sram_end); + + if (0 == result) { + data->fan_table_start = tmp; + } + + error |= (0 != result); + + result = iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, mcArbDramTimingTable), + &tmp, data->sram_end); + + if (0 == result) { + data->arb_table_start = tmp; + } + + error |= (0 != result); + + + result = iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, Version), + &tmp, data->sram_end); + + if (0 == result) { + hwmgr->microcode_version_info.SMC = tmp; + } + + error |= (0 != result); + + result = iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, UlvSettings), + &tmp, data->sram_end); + + if (0 == result) { + data->ulv_settings_start = tmp; + } + + error |= (0 != result); + + return error ? 1 : 0; +} + +/* +* Copy one arb setting to another and then switch the active set. +* arbFreqSrc and arbFreqDest is one of the MC_CG_ARB_FREQ_Fx constants. +*/ +int iceland_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, + uint32_t arbFreqSrc, uint32_t arbFreqDest) +{ + uint32_t mc_arb_dram_timing; + uint32_t mc_arb_dram_timing2; + uint32_t burst_time; + uint32_t mc_cg_config; + + switch (arbFreqSrc) { + case MC_CG_ARB_FREQ_F0: + mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); + break; + + case MC_CG_ARB_FREQ_F1: + mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1); + mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1); + burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1); + break; + + default: + return -1; + } + + switch (arbFreqDest) { + case MC_CG_ARB_FREQ_F0: + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); + PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time); + break; + + case MC_CG_ARB_FREQ_F1: + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); + cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); + PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time); + break; + + default: + return -1; + } + + mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG); + mc_cg_config |= 0x0000000F; + cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config); + PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arbFreqDest); + + return 0; +} + +/** + * Initial switch from ARB F0->F1 + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + * This function is to be called from the SetPowerState table. + */ +int iceland_initial_switch_from_arb_f0_to_f1(struct pp_hwmgr *hwmgr) +{ + return iceland_copy_and_switch_arb_sets(hwmgr, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); +} + +/* ---------------------------------------- ULV related functions ----------------------------------------------------*/ + + +static int iceland_reset_single_dpm_table( + struct pp_hwmgr *hwmgr, + struct iceland_single_dpm_table *dpm_table, + uint32_t count) +{ + uint32_t i; + if (!(count <= MAX_REGULAR_DPM_NUMBER)) + printk(KERN_ERR "[ powerplay ] Fatal error, can not set up single DPM \ + table entries to exceed max number! \n"); + + dpm_table->count = count; + for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) { + dpm_table->dpm_levels[i].enabled = 0; + } + + return 0; +} + +static void iceland_setup_pcie_table_entry( + struct iceland_single_dpm_table *dpm_table, + uint32_t index, uint32_t pcie_gen, + uint32_t pcie_lanes) +{ + dpm_table->dpm_levels[index].value = pcie_gen; + dpm_table->dpm_levels[index].param1 = pcie_lanes; + dpm_table->dpm_levels[index].enabled = 1; +} + +/* + * Set up the PCIe DPM table as follows: + * + * A = Performance State, Max, Gen Speed + * C = Performance State, Min, Gen Speed + * 1 = Performance State, Max, Lane # + * 3 = Performance State, Min, Lane # + * + * B = Power Saving State, Max, Gen Speed + * D = Power Saving State, Min, Gen Speed + * 2 = Power Saving State, Max, Lane # + * 4 = Power Saving State, Min, Lane # + * + * + * DPM Index Gen Speed Lane # + * 5 A 1 + * 4 B 2 + * 3 C 1 + * 2 D 2 + * 1 C 3 + * 0 D 4 + * + */ +static int iceland_setup_default_pcie_tables(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels || + data->use_pcie_power_saving_levels), + "No pcie performance levels!", return -EINVAL); + + if (data->use_pcie_performance_levels && !data->use_pcie_power_saving_levels) { + data->pcie_gen_power_saving = data->pcie_gen_performance; + data->pcie_lane_power_saving = data->pcie_lane_performance; + } else if (!data->use_pcie_performance_levels && data->use_pcie_power_saving_levels) { + data->pcie_gen_performance = data->pcie_gen_power_saving; + data->pcie_lane_performance = data->pcie_lane_power_saving; + } + + iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.pcie_speed_table, SMU71_MAX_LEVELS_LINK); + + /* Hardcode Pcie Table */ + iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0, + get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1, + get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2, + get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3, + get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4, + get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5, + get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen), + get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane)); + data->dpm_table.pcie_speed_table.count = 6; + + return 0; + +} + + +/* + * This function is to initalize all DPM state tables for SMU7 based on the dependency table. + * Dynamic state patching function will then trim these state tables to the allowed range based + * on the power policy or external client requests, such as UVD request, etc. + */ +static int iceland_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + uint32_t i; + + struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table = + hwmgr->dyn_state.vddc_dependency_on_sclk; + struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table = + hwmgr->dyn_state.vddc_dependency_on_mclk; + struct phm_cac_leakage_table *std_voltage_table = + hwmgr->dyn_state.cac_leakage_table; + + PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, + "SCLK dependency table is missing. This table is mandatory", return -1); + PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1, + "SCLK dependency table has to have is missing. This table is mandatory", return -1); + + PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, + "MCLK dependency table is missing. This table is mandatory", return -1); + PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1, + "VMCLK dependency table has to have is missing. This table is mandatory", return -1); + + /* clear the state table to reset everything to default */ + memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table)); + iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.sclk_table, SMU71_MAX_LEVELS_GRAPHICS); + iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.mclk_table, SMU71_MAX_LEVELS_MEMORY); + iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.vddc_table, SMU71_MAX_LEVELS_VDDC); + iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.vdd_ci_table, SMU71_MAX_LEVELS_VDDCI); + iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.mvdd_table, SMU71_MAX_LEVELS_MVDD); + + PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, + "SCLK dependency table is missing. This table is mandatory", return -1); + /* Initialize Sclk DPM table based on allow Sclk values*/ + data->dpm_table.sclk_table.count = 0; + + for (i = 0; i < allowed_vdd_sclk_table->count; i++) { + if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value != + allowed_vdd_sclk_table->entries[i].clk) { + data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = + allowed_vdd_sclk_table->entries[i].clk; + data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */ + data->dpm_table.sclk_table.count++; + } + } + + PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, + "MCLK dependency table is missing. This table is mandatory", return -1); + /* Initialize Mclk DPM table based on allow Mclk values */ + data->dpm_table.mclk_table.count = 0; + for (i = 0; i < allowed_vdd_mclk_table->count; i++) { + if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value != + allowed_vdd_mclk_table->entries[i].clk) { + data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = + allowed_vdd_mclk_table->entries[i].clk; + data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */ + data->dpm_table.mclk_table.count++; + } + } + + /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */ + for (i = 0; i < allowed_vdd_sclk_table->count; i++) { + data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; + data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage; + /* param1 is for corresponding std voltage */ + data->dpm_table.vddc_table.dpm_levels[i].enabled = 1; + } + + data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; + allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk; + + if (NULL != allowed_vdd_mclk_table) { + /* Initialize Vddci DPM table based on allow Mclk values */ + for (i = 0; i < allowed_vdd_mclk_table->count; i++) { + data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; + data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1; + } + data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count; + } + + allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk; + + if (NULL != allowed_vdd_mclk_table) { + /* + * Initialize MVDD DPM table based on allow Mclk + * values + */ + for (i = 0; i < allowed_vdd_mclk_table->count; i++) { + data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; + data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1; + } + data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; + } + + /* setup PCIE gen speed levels*/ + iceland_setup_default_pcie_tables(hwmgr); + + /* save a copy of the default DPM table*/ + memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct iceland_dpm_table)); + + return 0; +} + +/** + * @brief PhwIceland_GetVoltageOrder + * Returns index of requested voltage record in lookup(table) + * @param hwmgr - pointer to hardware manager + * @param lookutab - lookup list to search in + * @param voltage - voltage to look for + * @return 0 on success + */ +uint8_t iceland_get_voltage_index(phm_ppt_v1_voltage_lookup_table *look_up_table, + uint16_t voltage) +{ + uint8_t count = (uint8_t) (look_up_table->count); + uint8_t i; + + PP_ASSERT_WITH_CODE((NULL != look_up_table), "Lookup Table empty.", return 0;); + PP_ASSERT_WITH_CODE((0 != count), "Lookup Table empty.", return 0;); + + for (i = 0; i < count; i++) { + /* find first voltage equal or bigger than requested */ + if (look_up_table->entries[i].us_vdd >= voltage) + return i; + } + + /* voltage is bigger than max voltage in the table */ + return i-1; +} + + +static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr, + pp_atomctrl_voltage_table_entry *tab, uint16_t *hi, + uint16_t *lo) +{ + uint16_t v_index; + bool vol_found = false; + *hi = tab->value * VOLTAGE_SCALE; + *lo = tab->value * VOLTAGE_SCALE; + + /* SCLK/VDDC Dependency Table has to exist. */ + PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk, + "The SCLK/VDDC Dependency Table does not exist.\n", + return -EINVAL); + + if (NULL == hwmgr->dyn_state.cac_leakage_table) { + pr_warning("CAC Leakage Table does not exist, using vddc.\n"); + return 0; + } + + /* + * Since voltage in the sclk/vddc dependency table is not + * necessarily in ascending order because of ELB voltage + * patching, loop through entire list to find exact voltage. + */ + for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) { + if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { + vol_found = true; + if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) { + *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE); + } else { + pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n"); + *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); + } + break; + } + } + + /* + * If voltage is not found in the first pass, loop again to + * find the best match, equal or higher value. + */ + if (!vol_found) { + for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) { + if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { + vol_found = true; + if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) { + *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE; + } else { + pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table."); + *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; + *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); + } + break; + } + } + + if (!vol_found) + pr_warning("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n"); + } + + return 0; +} + +static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr, + pp_atomctrl_voltage_table_entry *tab, + SMU71_Discrete_VoltageLevel *smc_voltage_tab) { + int result; + + + result = iceland_get_std_voltage_value_sidd(hwmgr, tab, + &smc_voltage_tab->StdVoltageHiSidd, + &smc_voltage_tab->StdVoltageLoSidd); + if (0 != result) { + smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE; + smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE; + } + + smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE); + CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd); + CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd); + + return 0; +} + +/** + * Vddc table preparation for SMC. + * + * @param hwmgr the address of the hardware manager + * @param table the SMC DPM table structure to be populated + * @return always 0 + */ +static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + unsigned int count; + int result; + + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + table->VddcLevelCount = data->vddc_voltage_table.count; + for (count = 0; count < table->VddcLevelCount; count++) { + result = iceland_populate_smc_voltage_table(hwmgr, + &data->vddc_voltage_table.entries[count], + &table->VddcLevel[count]); + PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL); + + /* GPIO voltage control */ + if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) + table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low; + else if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) + table->VddcLevel[count].Smio = 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); + + return 0; +} + +/** + * Vddci table preparation for SMC. + * + * @param *hwmgr The address of the hardware manager. + * @param *table The SMC DPM table structure to be populated. + * @return 0 + */ +static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + int result; + uint32_t count; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + table->VddciLevelCount = data->vddci_voltage_table.count; + for (count = 0; count < table->VddciLevelCount; count++) { + result = iceland_populate_smc_voltage_table(hwmgr, + &data->vddci_voltage_table.entries[count], + &table->VddciLevel[count]); + PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDCI voltage table", return -EINVAL); + + /* GPIO voltage control */ + if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) + table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low; + else + table->VddciLevel[count].Smio = 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount); + + return 0; +} + +/** + * Mvdd table preparation for SMC. + * + * @param *hwmgr The address of the hardware manager. + * @param *table The SMC DPM table structure to be populated. + * @return 0 + */ +static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + int result; + uint32_t count; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + table->MvddLevelCount = data->mvdd_voltage_table.count; + for (count = 0; count < table->MvddLevelCount; count++) { + result = iceland_populate_smc_voltage_table(hwmgr, + &data->mvdd_voltage_table.entries[count], + &table->MvddLevel[count]); + PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDCI voltage table", return -EINVAL); + + /* GPIO voltage control */ + if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) + table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low; + else + table->MvddLevel[count].Smio = 0; + } + + CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount); + + return 0; +} + +/** + * Convert a voltage value in mv unit to VID number required by SMU firmware + */ +static uint8_t convert_to_vid(uint16_t vddc) +{ + return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25); +} + +int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr) +{ + int i; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint8_t * hi_vid = data->power_tune_table.BapmVddCVidHiSidd; + uint8_t * lo_vid = data->power_tune_table.BapmVddCVidLoSidd; + + PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table, + "The CAC Leakage table does not exist!", return -EINVAL); + PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8, + "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL); + PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count, + "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) { + for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) { + lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1); + hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2); + } + } else { + PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL); + } + + return 0; +} + +int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr) +{ + int i; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint8_t *vid = data->power_tune_table.VddCVid; + + PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8, + "There should never be more than 8 entries for VddcVid!!!", + return -EINVAL); + + for (i = 0; i < (int)data->vddc_voltage_table.count; i++) { + vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value); + } + + return 0; +} + +/** + * Preparation of voltage tables for SMC. + * + * @param hwmgr the address of the hardware manager + * @param table the SMC DPM table structure to be populated + * @return always 0 + */ + +int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + int result; + + result = iceland_populate_smc_vddc_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate VDDC voltage table to SMC", return -1); + + result = iceland_populate_smc_vdd_ci_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate VDDCI voltage table to SMC", return -1); + + result = iceland_populate_smc_mvdd_table(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "can not populate MVDD voltage table to SMC", return -1); + + return 0; +} + + +/** + * Re-generate the DPM level mask value + * @param hwmgr the address of the hardware manager + */ +static uint32_t iceland_get_dpm_level_enable_mask_value( + struct iceland_single_dpm_table * dpm_table) +{ + uint32_t i; + uint32_t mask_value = 0; + + for (i = dpm_table->count; i > 0; i--) { + mask_value = mask_value << 1; + + if (dpm_table->dpm_levels[i-1].enabled) + mask_value |= 0x1; + else + mask_value &= 0xFFFFFFFE; + } + return mask_value; +} + +int iceland_populate_memory_timing_parameters( + struct pp_hwmgr *hwmgr, + uint32_t engine_clock, + uint32_t memory_clock, + struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs + ) +{ + uint32_t dramTiming; + uint32_t dramTiming2; + uint32_t burstTime; + int result; + + result = atomctrl_set_engine_dram_timings_rv770(hwmgr, + engine_clock, memory_clock); + + PP_ASSERT_WITH_CODE(result == 0, + "Error calling VBIOS to set DRAM_TIMING.", return result); + + dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); + + arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming); + arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2); + arb_regs->McArbBurstTime = (uint8_t)burstTime; + + return 0; +} + +/** + * Setup parameters for the MC ARB. + * + * @param hwmgr the address of the powerplay hardware manager. + * @return always 0 + * This function is to be called from the SetPowerState table. + */ +int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + int result = 0; + SMU71_Discrete_MCArbDramTimingTable arb_regs; + uint32_t i, j; + + memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable)); + + for (i = 0; i < data->dpm_table.sclk_table.count; i++) { + for (j = 0; j < data->dpm_table.mclk_table.count; j++) { + result = iceland_populate_memory_timing_parameters + (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value, + data->dpm_table.mclk_table.dpm_levels[j].value, + &arb_regs.entries[i][j]); + + if (0 != result) { + break; + } + } + } + + if (0 == result) { + result = iceland_copy_bytes_to_smc( + hwmgr->smumgr, + data->arb_table_start, + (uint8_t *)&arb_regs, + sizeof(SMU71_Discrete_MCArbDramTimingTable), + data->sram_end + ); + } + + return result; +} + +static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + struct iceland_dpm_table *dpm_table = &data->dpm_table; + uint32_t i; + + /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */ + for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { + table->LinkLevel[i].PcieGenSpeed = + (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; + table->LinkLevel[i].PcieLaneCount = + (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); + table->LinkLevel[i].EnabledForActivity = + 1; + table->LinkLevel[i].SPC = + (uint8_t)(data->pcie_spc_cap & 0xff); + table->LinkLevel[i].DownThreshold = + PP_HOST_TO_SMC_UL(5); + table->LinkLevel[i].UpThreshold = + PP_HOST_TO_SMC_UL(30); + } + + data->smc_state_table.LinkLevelCount = + (uint8_t)dpm_table->pcie_speed_table.count; + data->dpm_level_enable_mask.pcie_dpm_enable_mask = + iceland_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); + + return 0; +} + +static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + return 0; +} + +uint8_t iceland_get_voltage_id(pp_atomctrl_voltage_table *voltage_table, + uint32_t voltage) +{ + uint8_t count = (uint8_t) (voltage_table->count); + uint8_t i = 0; + + PP_ASSERT_WITH_CODE((NULL != voltage_table), + "Voltage Table empty.", return 0;); + PP_ASSERT_WITH_CODE((0 != count), + "Voltage Table empty.", return 0;); + + for (i = 0; i < count; i++) { + /* find first voltage bigger than requested */ + if (voltage_table->entries[i].value >= voltage) + return i; + } + + /* voltage is bigger than max voltage in the table */ + return i - 1; +} + +static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + return 0; +} + +static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + return 0; +} + +static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + return 0; +} + + +static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *tab) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) + tab->SVI2Enable |= VDDC_ON_SVI2; + + if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) + tab->SVI2Enable |= VDDCI_ON_SVI2; + else + tab->MergedVddci = 1; + + if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) + tab->SVI2Enable |= MVDD_ON_SVI2; + + PP_ASSERT_WITH_CODE( tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) && + (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL); + + return 0; +} + +static int iceland_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr, + struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table, + uint32_t clock, uint32_t *vol) +{ + uint32_t i = 0; + + /* clock - voltage dependency table is empty table */ + if (allowed_clock_voltage_table->count == 0) + return -EINVAL; + + for (i = 0; i < allowed_clock_voltage_table->count; i++) { + /* find first sclk bigger than request */ + if (allowed_clock_voltage_table->entries[i].clk >= clock) { + *vol = allowed_clock_voltage_table->entries[i].v; + return 0; + } + } + + /* sclk is bigger than max sclk in the dependence table */ + *vol = allowed_clock_voltage_table->entries[i - 1].v; + + return 0; +} + +static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock, + bool strobe_mode) +{ + uint8_t mc_para_index; + + if (strobe_mode) { + if (memory_clock < 12500) { + mc_para_index = 0x00; + } else if (memory_clock > 47500) { + mc_para_index = 0x0f; + } else { + mc_para_index = (uint8_t)((memory_clock - 10000) / 2500); + } + } else { + if (memory_clock < 65000) { + mc_para_index = 0x00; + } else if (memory_clock > 135000) { + mc_para_index = 0x0f; + } else { + mc_para_index = (uint8_t)((memory_clock - 60000) / 5000); + } + } + + return mc_para_index; +} + +static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock) +{ + uint8_t mc_para_index; + + if (memory_clock < 10000) { + mc_para_index = 0; + } else if (memory_clock >= 80000) { + mc_para_index = 0x0f; + } else { + mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1); + } + + return mc_para_index; +} + +static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl, + uint32_t sclk, uint32_t *p_shed) +{ + unsigned int i; + + /* use the minimum phase shedding */ + *p_shed = 1; + + /* + * PPGen ensures the phase shedding limits table is sorted + * from lowest voltage/sclk/mclk to highest voltage/sclk/mclk. + * VBIOS ensures the phase shedding masks table is sorted from + * least phases enabled (phase shedding on) to most phases + * enabled (phase shedding off). + */ + for (i = 0; i < pl->count; i++) { + if (sclk < pl->entries[i].Sclk) { + /* Enable phase shedding */ + *p_shed = i; + break; + } + } + + return 0; +} + +static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl, + uint32_t memory_clock, uint32_t *p_shed) +{ + unsigned int i; + + /* use the minimum phase shedding */ + *p_shed = 1; + + /* + * PPGen ensures the phase shedding limits table is sorted + * from lowest voltage/sclk/mclk to highest voltage/sclk/mclk. + * VBIOS ensures the phase shedding masks table is sorted from + * least phases enabled (phase shedding on) to most phases + * enabled (phase shedding off). + */ + for (i = 0; i < pl->count; i++) { + if (memory_clock < pl->entries[i].Mclk) { + /* Enable phase shedding */ + *p_shed = i; + break; + } + } + + return 0; +} + +/** + * Populates the SMC MCLK structure using the provided memory clock + * + * @param hwmgr the address of the hardware manager + * @param memory_clock the memory clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +static int iceland_calculate_mclk_params( + struct pp_hwmgr *hwmgr, + uint32_t memory_clock, + SMU71_Discrete_MemoryLevel *mclk, + bool strobe_mode, + bool dllStateOn + ) +{ + const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; + uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; + uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL; + uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL; + uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL; + uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1; + uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2; + uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1; + uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2; + + pp_atomctrl_memory_clock_param mpll_param; + int result; + + result = atomctrl_get_memory_pll_dividers_si(hwmgr, + memory_clock, &mpll_param, strobe_mode); + PP_ASSERT_WITH_CODE(0 == result, + "Error retrieving Memory Clock Parameters from VBIOS.", return result); + + /* MPLL_FUNC_CNTL setup*/ + mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl); + + /* MPLL_FUNC_CNTL_1 setup*/ + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf); + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac); + mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1, + MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode); + + /* MPLL_AD_FUNC_CNTL setup*/ + mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl, + MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); + + if (data->is_memory_GDDR5) { + /* MPLL_DQ_FUNC_CNTL setup*/ + mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, + MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel); + mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl, + MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MemorySpreadSpectrumSupport)) { + /* + ************************************ + Fref = Reference Frequency + NF = Feedback divider ratio + NR = Reference divider ratio + Fnom = Nominal VCO output frequency = Fref * NF / NR + Fs = Spreading Rate + D = Percentage down-spread / 2 + Fint = Reference input frequency to PFD = Fref / NR + NS = Spreading rate divider ratio = int(Fint / (2 * Fs)) + CLKS = NS - 1 = ISS_STEP_NUM[11:0] + NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2) + CLKV = 65536 * NV = ISS_STEP_SIZE[25:0] + ************************************* + */ + pp_atomctrl_internal_ss_info ss_info; + uint32_t freq_nom; + uint32_t tmp; + uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr); + + /* for GDDR5 for all modes and DDR3 */ + if (1 == mpll_param.qdr) + freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider); + else + freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider); + + /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/ + tmp = (freq_nom / reference_clock); + tmp = tmp * tmp; + + if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) { + /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */ + /* ss.Info.speed_spectrum_rate -- in unit of khz */ + /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */ + /* = reference_clock * 5 / speed_spectrum_rate */ + uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate; + + /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */ + /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */ + uint32_t clkv = + (uint32_t)((((131 * ss_info.speed_spectrum_percentage * + ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom); + + mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv); + mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks); + } + } + + /* MCLK_PWRMGT_CNTL setup */ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn); + + + /* Save the result data to outpupt memory level structure */ + mclk->MclkFrequency = memory_clock; + mclk->MpllFuncCntl = mpll_func_cntl; + mclk->MpllFuncCntl_1 = mpll_func_cntl_1; + mclk->MpllFuncCntl_2 = mpll_func_cntl_2; + mclk->MpllAdFuncCntl = mpll_ad_func_cntl; + mclk->MpllDqFuncCntl = mpll_dq_func_cntl; + mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; + mclk->DllCntl = dll_cntl; + mclk->MpllSs1 = mpll_ss1; + mclk->MpllSs2 = mpll_ss2; + + return 0; +} + +static int iceland_populate_single_memory_level( + struct pp_hwmgr *hwmgr, + uint32_t memory_clock, + SMU71_Discrete_MemoryLevel *memory_level + ) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + int result = 0; + bool dllStateOn; + struct cgs_display_info info = {0}; + + + if (NULL != hwmgr->dyn_state.vddc_dependency_on_mclk) { + result = iceland_get_dependecy_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result); + } + + if (data->vdd_ci_control == ICELAND_VOLTAGE_CONTROL_NONE) { + memory_level->MinVddci = memory_level->MinVddc; + } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) { + result = iceland_get_dependecy_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddci_dependency_on_mclk, + memory_clock, + &memory_level->MinVddci); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result); + } + + if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) { + result = iceland_get_dependecy_volt_by_clk(hwmgr, + hwmgr->dyn_state.mvdd_dependency_on_mclk, memory_clock, &memory_level->MinMvdd); + PP_ASSERT_WITH_CODE((0 == result), + "can not find MinMVDD voltage value from memory MVDD voltage dependency table", return result); + } + + memory_level->MinVddcPhases = 1; + + if (data->vddc_phase_shed_control) { + iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table, + memory_clock, &memory_level->MinVddcPhases); + } + + memory_level->EnabledForThrottle = 1; + memory_level->EnabledForActivity = 1; + memory_level->UpHyst = 0; + memory_level->DownHyst = 100; + memory_level->VoltageDownHyst = 0; + + /* Indicates maximum activity level for this performance level.*/ + memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target; + memory_level->StutterEnable = 0; + memory_level->StrobeEnable = 0; + memory_level->EdcReadEnable = 0; + memory_level->EdcWriteEnable = 0; + memory_level->RttEnable = 0; + + /* default set to low watermark. Highest level will be set to high later.*/ + memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + cgs_get_active_displays_info(hwmgr->device, &info); + data->display_timing.num_existing_displays = info.display_count; + + //if ((data->mclk_stutter_mode_threshold != 0) && + // (memory_clock <= data->mclk_stutter_mode_threshold) && + // (data->is_uvd_enabled == 0) + // && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1) + // && (data->display_timing.num_existing_displays <= 2) + // && (data->display_timing.num_existing_displays != 0)) + // memory_level->StutterEnable = 1; + + /* decide strobe mode*/ + memory_level->StrobeEnable = (data->mclk_strobe_mode_threshold != 0) && + (memory_clock <= data->mclk_strobe_mode_threshold); + + /* decide EDC mode and memory clock ratio*/ + if (data->is_memory_GDDR5) { + memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock, + memory_level->StrobeEnable); + + if ((data->mclk_edc_enable_threshold != 0) && + (memory_clock > data->mclk_edc_enable_threshold)) { + memory_level->EdcReadEnable = 1; + } + + if ((data->mclk_edc_wr_enable_threshold != 0) && + (memory_clock > data->mclk_edc_wr_enable_threshold)) { + memory_level->EdcWriteEnable = 1; + } + + if (memory_level->StrobeEnable) { + if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >= + ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) { + dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; + } else { + dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0; + } + + } else { + dllStateOn = data->dll_defaule_on; + } + } else { + memory_level->StrobeRatio = + iceland_get_ddr3_mclk_frequency_ratio(memory_clock); + dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0; + } + + result = iceland_calculate_mclk_params(hwmgr, + memory_clock, memory_level, memory_level->StrobeEnable, dllStateOn); + + if (0 == result) { + memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases); + memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE); + memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE); + /* MCLK frequency in units of 10KHz*/ + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency); + /* Indicates maximum activity level for this performance level.*/ + CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1); + CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2); + } + + return result; +} + +/** + * Populates the SMC MVDD structure using the provided memory clock. + * + * @param hwmgr the address of the hardware manager + * @param mclk the MCLK value to be used in the decision if MVDD should be high or low. + * @param voltage the SMC VOLTAGE structure to be populated + */ +int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, SMU71_Discrete_VoltageLevel *voltage) +{ + const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + uint32_t i = 0; + + if (ICELAND_VOLTAGE_CONTROL_NONE != data->mvdd_control) { + /* find mvdd value which clock is more than request */ + for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) { + if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) { + /* Always round to higher voltage. */ + voltage->Voltage = data->mvdd_voltage_table.entries[i].value; + break; + } + } + + PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count, + "MVDD Voltage is outside the supported range.", return -1); + + } else { + return -1; + } + + return 0; +} + + +static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, + SMU71_Discrete_DpmTable *table) +{ + int result = 0; + const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + pp_atomctrl_clock_dividers_vi dividers; + SMU71_Discrete_VoltageLevel voltage_level; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2; + uint32_t dll_cntl = data->clock_registers.vDLL_CNTL; + uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL; + + /* The ACPI state should not do DPM on DC (or ever).*/ + table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; + + if (data->acpi_vddc) + table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE); + else + table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pp_table * VOLTAGE_SCALE); + + table->ACPILevel.MinVddcPhases = (data->vddc_phase_shed_control) ? 0 : 1; + + /* assign zero for now*/ + table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr); + + /* get the engine clock dividers for this clock value*/ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, + table->ACPILevel.SclkFrequency, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", return result); + + /* divider ID for required SCLK*/ + table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider; + table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + table->ACPILevel.DeepSleepDivId = 0; + + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_RESET, 1); + spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, + CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4); + + table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; + table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; + table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + table->ACPILevel.CcPwrDynRm = 0; + table->ACPILevel.CcPwrDynRm1 = 0; + + + /* For various features to be enabled/disabled while this level is active.*/ + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); + /* SCLK frequency in units of 10KHz*/ + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); + + table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc; + table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases; + + /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/ + + if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level)) + table->MemoryACPILevel.MinMvdd = + PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE); + else + table->MemoryACPILevel.MinMvdd = 0; + + /* Force reset on DLL*/ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1); + + /* Disable DLL in ACPIState*/ + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0); + mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl, + MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0); + + /* Enable DLL bypass signal*/ + dll_cntl = PHM_SET_FIELD(dll_cntl, + DLL_CNTL, MRDCK0_BYPASS, 0); + dll_cntl = PHM_SET_FIELD(dll_cntl, + DLL_CNTL, MRDCK1_BYPASS, 0); + + table->MemoryACPILevel.DllCntl = + PP_HOST_TO_SMC_UL(dll_cntl); + table->MemoryACPILevel.MclkPwrmgtCntl = + PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl); + table->MemoryACPILevel.MpllAdFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL); + table->MemoryACPILevel.MpllDqFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL); + table->MemoryACPILevel.MpllFuncCntl = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL); + table->MemoryACPILevel.MpllFuncCntl_1 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1); + table->MemoryACPILevel.MpllFuncCntl_2 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2); + table->MemoryACPILevel.MpllSs1 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1); + table->MemoryACPILevel.MpllSs2 = + PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2); + + table->MemoryACPILevel.EnabledForThrottle = 0; + table->MemoryACPILevel.EnabledForActivity = 0; + table->MemoryACPILevel.UpHyst = 0; + table->MemoryACPILevel.DownHyst = 100; + table->MemoryACPILevel.VoltageDownHyst = 0; + /* Indicates maximum activity level for this performance level.*/ + table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target); + + table->MemoryACPILevel.StutterEnable = 0; + table->MemoryACPILevel.StrobeEnable = 0; + table->MemoryACPILevel.EdcReadEnable = 0; + table->MemoryACPILevel.EdcWriteEnable = 0; + table->MemoryACPILevel.RttEnable = 0; + + return result; +} + +static int iceland_find_boot_level(struct iceland_single_dpm_table *table, uint32_t value, uint32_t *boot_level) +{ + int result = 0; + uint32_t i; + + for (i = 0; i < table->count; i++) { + if (value == table->dpm_levels[i].value) { + *boot_level = i; + result = 0; + } + } + return result; +} + +/** + * Calculates the SCLK dividers using the provided engine clock + * + * @param hwmgr the address of the hardware manager + * @param engine_clock the engine clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr, + uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk) +{ + const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + pp_atomctrl_clock_dividers_vi dividers; + uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL; + uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3; + uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4; + uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM; + uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t reference_clock; + uint32_t reference_divider; + uint32_t fbdiv; + int result; + + /* get the engine clock dividers for this clock value*/ + result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, ÷rs); + + PP_ASSERT_WITH_CODE(result == 0, + "Error retrieving Engine Clock dividers from VBIOS.", return result); + + /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/ + reference_clock = atomctrl_get_reference_clock(hwmgr); + + reference_divider = 1 + dividers.uc_pll_ref_div; + + /* low 14 bits is fraction and high 12 bits is divider*/ + fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF; + + /* SPLL_FUNC_CNTL setup*/ + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div); + spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, + CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div); + + /* SPLL_FUNC_CNTL_3 setup*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, + CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv); + + /* set to use fractional accumulation*/ + spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, + CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EngineSpreadSpectrumSupport)) { + pp_atomctrl_internal_ss_info ss_info; + + uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div; + if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) { + /* + * ss_info.speed_spectrum_percentage -- in unit of 0.01% + * ss_info.speed_spectrum_rate -- in unit of khz + */ + /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */ + uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate); + + /* clkv = 2 * D * fbdiv / NS */ + uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000); + + cg_spll_spread_spectrum = + PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS); + cg_spll_spread_spectrum = + PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1); + cg_spll_spread_spectrum_2 = + PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV); + } + } + + sclk->SclkFrequency = engine_clock; + sclk->CgSpllFuncCntl3 = spll_func_cntl_3; + sclk->CgSpllFuncCntl4 = spll_func_cntl_4; + sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; + sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; + sclk->SclkDid = (uint8_t)dividers.pll_post_divider; + + return 0; +} + +static uint8_t iceland_get_sleep_divider_id_from_clock(struct pp_hwmgr *hwmgr, + uint32_t engine_clock, uint32_t min_engine_clock_in_sr) +{ + uint32_t i, temp; + uint32_t min = (min_engine_clock_in_sr > ICELAND_MINIMUM_ENGINE_CLOCK) ? + min_engine_clock_in_sr : ICELAND_MINIMUM_ENGINE_CLOCK; + + PP_ASSERT_WITH_CODE((engine_clock >= min), + "Engine clock can't satisfy stutter requirement!", return 0); + + for (i = ICELAND_MAX_DEEPSLEEP_DIVIDER_ID;; i--) { + temp = engine_clock / (1 << i); + + if(temp >= min || i == 0) + break; + } + return (uint8_t)i; +} + +/** + * Populates single SMC SCLK structure using the provided engine clock + * + * @param hwmgr the address of the hardware manager + * @param engine_clock the engine clock to use to populate the structure + * @param sclk the SMC SCLK structure to be populated + */ +static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr, + uint32_t engine_clock, uint16_t sclk_activity_level_threshold, + SMU71_Discrete_GraphicsLevel *graphic_level) +{ + int result; + uint32_t threshold; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level); + + + /* populate graphics levels*/ + result = iceland_get_dependecy_volt_by_clk(hwmgr, + hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock, &graphic_level->MinVddc); + PP_ASSERT_WITH_CODE((0 == result), + "can not find VDDC voltage value for VDDC engine clock dependency table", return result); + + /* SCLK frequency in units of 10KHz*/ + graphic_level->SclkFrequency = engine_clock; + + /* + * Minimum VDDC phases required to support this level, it + * should get from dependence table. + */ + graphic_level->MinVddcPhases = 1; + + if (data->vddc_phase_shed_control) { + iceland_populate_phase_value_based_on_sclk(hwmgr, + hwmgr->dyn_state.vddc_phase_shed_limits_table, + engine_clock, + &graphic_level->MinVddcPhases); + } + + /* Indicates maximum activity level for this performance level. 50% for now*/ + graphic_level->ActivityLevel = sclk_activity_level_threshold; + + graphic_level->CcPwrDynRm = 0; + graphic_level->CcPwrDynRm1 = 0; + /* this level can be used if activity is high enough.*/ + graphic_level->EnabledForActivity = 1; + /* this level can be used for throttling.*/ + graphic_level->EnabledForThrottle = 1; + graphic_level->UpHyst = 0; + graphic_level->DownHyst = 100; + graphic_level->VoltageDownHyst = 0; + graphic_level->PowerThrottle = 0; + + threshold = engine_clock * data->fast_watermark_threshold / 100; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) { + graphic_level->DeepSleepDivId = + iceland_get_sleep_divider_id_from_clock(hwmgr, engine_clock, + data->display_timing.min_clock_insr); + } + + /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/ + graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + if (0 == result) { + graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE); + /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/ + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1); + } + + return result; +} + +/** + * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states + * + * @param hwmgr the address of the hardware manager + */ +static int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + struct iceland_dpm_table *dpm_table = &data->dpm_table; + int result = 0; + uint32_t level_array_adress = data->dpm_table_start + + offsetof(SMU71_Discrete_DpmTable, GraphicsLevel); + + uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) * SMU71_MAX_LEVELS_GRAPHICS; + SMU71_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel; + uint32_t i; + uint8_t highest_pcie_level_enabled = 0, lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0, count = 0; + memset(levels, 0x00, level_array_size); + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + result = iceland_populate_single_graphic_level(hwmgr, + dpm_table->sclk_table.dpm_levels[i].value, + (uint16_t)data->activity_target[i], + &(data->smc_state_table.GraphicsLevel[i])); + if (0 != result) + return result; + + /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ + if (i > 1) + data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; + } + + /* set highest level watermark to high */ + if (dpm_table->sclk_table.count > 1) + data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark = + PPSMC_DISPLAY_WATERMARK_HIGH; + + data->smc_state_table.GraphicsDpmLevelCount = + (uint8_t)dpm_table->sclk_table.count; + data->dpm_level_enable_mask.sclk_dpm_enable_mask = + iceland_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); + + while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (highest_pcie_level_enabled + 1))) != 0) { + highest_pcie_level_enabled++; + } + + while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << lowest_pcie_level_enabled)) == 0) { + lowest_pcie_level_enabled++; + } + + while ((count < highest_pcie_level_enabled) && + ((data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) { + count++; + } + + mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ? + (lowest_pcie_level_enabled + 1 + count) : highest_pcie_level_enabled; + + /* set pcieDpmLevel to highest_pcie_level_enabled*/ + for (i = 2; i < dpm_table->sclk_table.count; i++) { + data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled; + } + + /* set pcieDpmLevel to lowest_pcie_level_enabled*/ + data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled; + + /* set pcieDpmLevel to mid_pcie_level_enabled*/ + data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled; + + /* level count will send to smc once at init smc table and never change*/ + result = iceland_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end); + + if (0 != result) + return result; + + return 0; +} + +/** + * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states + * + * @param hwmgr the address of the hardware manager + */ + +static int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + struct iceland_dpm_table *dpm_table = &data->dpm_table; + int result; + /* populate MCLK dpm table to SMU7 */ + uint32_t level_array_adress = data->dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel); + uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY; + SMU71_Discrete_MemoryLevel *levels = data->smc_state_table.MemoryLevel; + uint32_t i; + + memset(levels, 0x00, level_array_size); + + for (i = 0; i < dpm_table->mclk_table.count; i++) { + PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), + "can not populate memory level as memory clock is zero", return -1); + result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value, + &(data->smc_state_table.MemoryLevel[i])); + if (0 != result) { + return result; + } + } + + /* Only enable level 0 for now.*/ + data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1; + + /* + * in order to prevent MC activity from stutter mode to push DPM up. + * the UVD change complements this by putting the MCLK in a higher state + * by default such that we are not effected by up threshold or and MCLK DPM latency. + */ + data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F; + CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.MemoryLevel[0].ActivityLevel); + + data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count; + data->dpm_level_enable_mask.mclk_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); + /* set highest level watermark to high*/ + data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; + + /* level count will send to smc once at init smc table and never change*/ + result = iceland_copy_bytes_to_smc(hwmgr->smumgr, + level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end); + + if (0 != result) { + return result; + } + + return 0; +} + +struct ICELAND_DLL_SPEED_SETTING +{ + uint16_t Min; /* Minimum Data Rate*/ + uint16_t Max; /* Maximum Data Rate*/ + uint32_t dll_speed; /* The desired DLL_SPEED setting*/ +}; + +static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_Ulv *pstate) +{ + int result = 0; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + uint32_t voltage_response_time, ulv_voltage; + + pstate->CcPwrDynRm = 0; + pstate->CcPwrDynRm1 = 0; + + //backbiasResponseTime is use for ULV state voltage value. + result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage); + PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;); + + if(!ulv_voltage) { + data->ulv.ulv_supported = false; + return 0; + } + + if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 != data->voltage_control) { + /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */ + if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) { + pstate->VddcOffset = 0; + } + else { + /* used in SMIO Mode. not implemented for now. this is backup only for CI. */ + pstate->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage); + } + } else { + /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */ + if(ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) { + pstate->VddcOffsetVid = 0; + } else { + /* used in SVI2 Mode */ + pstate->VddcOffsetVid = (uint8_t)((hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage) * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); + } + } + + /* used in SVI2 Mode to shed phase */ + pstate->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1; + + if (0 == result) { + CONVERT_FROM_HOST_TO_SMC_UL(pstate->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(pstate->CcPwrDynRm1); + CONVERT_FROM_HOST_TO_SMC_US(pstate->VddcOffset); + } + + return result; +} + +static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr, SMU71_Discrete_Ulv *ulv) +{ + return iceland_populate_ulv_level(hwmgr, ulv); +} + +static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + uint8_t count, level; + + count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count); + + for (level = 0; level < count; level++) { + if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk + >= data->vbios_boot_state.sclk_bootup_value) { + data->smc_state_table.GraphicsBootLevel = level; + break; + } + } + + count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count); + + for (level = 0; level < count; level++) { + if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk + >= data->vbios_boot_state.mclk_bootup_value) { + data->smc_state_table.MemoryBootLevel = level; + break; + } + } + + return 0; +} + +/** + * Initializes the SMC table and uploads it + * + * @param hwmgr the address of the powerplay hardware manager. + * @param pInput the pointer to input data (PowerState) + * @return always 0 + */ +int iceland_init_smc_table(struct pp_hwmgr *hwmgr) +{ + int result; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + SMU71_Discrete_DpmTable *table = &(data->smc_state_table); + const struct phw_iceland_ulv_parm *ulv = &(data->ulv); + + result = iceland_setup_default_dpm_tables(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to setup default DPM tables!", return result;); + memset(&(data->smc_state_table), 0x00, sizeof(data->smc_state_table)); + + if (ICELAND_VOLTAGE_CONTROL_NONE != data->voltage_control) { + iceland_populate_smc_voltage_tables(hwmgr, table); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition)) { + table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StepVddc)) { + table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; + } + + if (data->is_memory_GDDR5) { + table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; + } + + if (ulv->ulv_supported) { + result = iceland_populate_ulv_state(hwmgr, &data->ulv_setting); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ULV state!", return result;); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_ULV_PARAMETER, ulv->ch_ulv_parameter); + } + + result = iceland_populate_smc_link_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Link Level!", return result;); + + result = iceland_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Graphics Level!", return result;); + + result = iceland_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize Memory Level!", return result;); + + result = iceland_populate_smc_acpi_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACPI Level!", return result;); + + result = iceland_populate_smc_vce_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize VCE Level!", return result;); + + result = iceland_populate_smc_acp_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize ACP Level!", return result;); + + result = iceland_populate_smc_samu_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize SAMU Level!", return result;); + + /* + * Since only the initial state is completely set up at this + * point (the other states are just copies of the boot state) + * we only need to populate the ARB settings for the initial + * state. + */ + result = iceland_program_memory_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to Write ARB settings for the initial state.", return result;); + + result = iceland_populate_smc_uvd_level(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize UVD Level!", return result;); + + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + /* find boot level from dpm table */ + result = iceland_find_boot_level(&(data->dpm_table.sclk_table), + data->vbios_boot_state.sclk_bootup_value, + (uint32_t *)&(data->smc_state_table.GraphicsBootLevel)); + + if (result) + pr_warning("VBIOS did not find boot engine clock value in dependency table.\n"); + + result = iceland_find_boot_level(&(data->dpm_table.mclk_table), + data->vbios_boot_state.mclk_bootup_value, + (uint32_t *)&(data->smc_state_table.MemoryBootLevel)); + + if (result) + pr_warning("VBIOS did not find boot memory clock value in dependency table.\n"); + + table->BootVddc = data->vbios_boot_state.vddc_bootup_value; + if (ICELAND_VOLTAGE_CONTROL_NONE == data->vdd_ci_control) { + table->BootVddci = table->BootVddc; + } + else { + table->BootVddci = data->vbios_boot_state.vddci_bootup_value; + } + table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value; + + result = iceland_populate_smc_initial_state(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result); + + result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result); + + table->GraphicsVoltageChangeEnable = 1; + table->GraphicsThermThrottleEnable = 1; + table->GraphicsInterval = 1; + table->VoltageInterval = 1; + table->ThermalInterval = 1; + table->TemperatureLimitHigh = + (data->thermal_temp_setting.temperature_high * + ICELAND_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + table->TemperatureLimitLow = + (data->thermal_temp_setting.temperature_low * + ICELAND_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + table->MemoryVoltageChangeEnable = 1; + table->MemoryInterval = 1; + table->VoltageResponseTime = 0; + table->PhaseResponseTime = 0; + table->MemoryThermThrottleEnable = 1; + table->PCIeBootLinkLevel = 0; + table->PCIeGenInterval = 1; + + result = iceland_populate_smc_svi2_config(hwmgr, table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to populate SVI2 setting!", return result); + + table->ThermGpio = 17; + table->SclkStepSize = 0x4000; + + CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid); + CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); + CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); + CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); + + table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE); + table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE); + table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE); + + /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ + result = iceland_copy_bytes_to_smc(hwmgr->smumgr, data->dpm_table_start + + offsetof(SMU71_Discrete_DpmTable, SystemFlags), + (uint8_t *)&(table->SystemFlags), + sizeof(SMU71_Discrete_DpmTable) - 3 * sizeof(SMU71_PIDController), + data->sram_end); + + PP_ASSERT_WITH_CODE(0 == result, + "Failed to upload dpm data to SMC memory!", return result); + + /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */ + result = iceland_copy_bytes_to_smc(hwmgr->smumgr, + data->ulv_settings_start, + (uint8_t *)&(data->ulv_setting), + sizeof(SMU71_Discrete_Ulv), + data->sram_end); + +#if 0 + /* Notify SMC to follow new GPIO scheme */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition)) { + if (0 == iceland_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_UseNewGPIOScheme)) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme); + } +#endif + + return result; +} + +int iceland_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU71_Discrete_MCRegisters *mc_reg_table) +{ + const struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + uint32_t i, j; + + for (i = 0, j = 0; j < data->iceland_mc_reg_table.last; j++) { + if (data->iceland_mc_reg_table.validflag & 1<<j) { + PP_ASSERT_WITH_CODE(i < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE, + "Index of mc_reg_table->address[] array out of boundary", return -1); + mc_reg_table->address[i].s0 = + PP_HOST_TO_SMC_US(data->iceland_mc_reg_table.mc_reg_address[j].s0); + mc_reg_table->address[i].s1 = + PP_HOST_TO_SMC_US(data->iceland_mc_reg_table.mc_reg_address[j].s1); + i++; + } + } + + mc_reg_table->last = (uint8_t)i; + + return 0; +} + +/* convert register values from driver to SMC format */ +void iceland_convert_mc_registers( + const phw_iceland_mc_reg_entry * pEntry, + SMU71_Discrete_MCRegisterSet *pData, + uint32_t numEntries, uint32_t validflag) +{ + uint32_t i, j; + + for (i = 0, j = 0; j < numEntries; j++) { + if (validflag & 1<<j) { + pData->value[i] = PP_HOST_TO_SMC_UL(pEntry->mc_data[j]); + i++; + } + } +} + +/* find the entry in the memory range table, then populate the value to SMC's iceland_mc_reg_table */ +int iceland_convert_mc_reg_table_entry_to_smc( + struct pp_hwmgr *hwmgr, + const uint32_t memory_clock, + SMU71_Discrete_MCRegisterSet *mc_reg_table_data + ) +{ + const iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint32_t i = 0; + + for (i = 0; i < data->iceland_mc_reg_table.num_entries; i++) { + if (memory_clock <= + data->iceland_mc_reg_table.mc_reg_table_entry[i].mclk_max) { + break; + } + } + + if ((i == data->iceland_mc_reg_table.num_entries) && (i > 0)) + --i; + + iceland_convert_mc_registers(&data->iceland_mc_reg_table.mc_reg_table_entry[i], + mc_reg_table_data, data->iceland_mc_reg_table.last, data->iceland_mc_reg_table.validflag); + + return 0; +} + +int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr, + SMU71_Discrete_MCRegisters *mc_reg_table) +{ + int result = 0; + iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + int res; + uint32_t i; + + for (i = 0; i < data->dpm_table.mclk_table.count; i++) { + res = iceland_convert_mc_reg_table_entry_to_smc( + hwmgr, + data->dpm_table.mclk_table.dpm_levels[i].value, + &mc_reg_table->data[i] + ); + + if (0 != res) + result = res; + } + + return result; +} + +int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + memset(&data->mc_reg_table, 0x00, sizeof(SMU71_Discrete_MCRegisters)); + result = iceland_populate_mc_reg_address(hwmgr, &(data->mc_reg_table)); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize MCRegTable for the MC register addresses!", return result;); + + result = iceland_convert_mc_reg_table_to_smc(hwmgr, &data->mc_reg_table); + PP_ASSERT_WITH_CODE(0 == result, + "Failed to initialize MCRegTable for driver state!", return result;); + + return iceland_copy_bytes_to_smc(hwmgr->smumgr, data->mc_reg_table_start, + (uint8_t *)&data->mc_reg_table, sizeof(SMU71_Discrete_MCRegisters), data->sram_end); +} + +int iceland_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) +{ + PPSMC_Msg msg = has_display? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; + + return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1; +} + +int iceland_enable_sclk_control(struct pp_hwmgr *hwmgr) +{ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, 0); + + return 0; +} + +int iceland_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* enable SCLK dpm */ + if (0 == data->sclk_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_DPM_Enable)), + "Failed to enable SCLK DPM during DPM Start Function!", + return -1); + } + + /* enable MCLK dpm */ + if (0 == data->mclk_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_Enable)), + "Failed to enable MCLK DPM during DPM Start Function!", + return -1); + + PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC0_CNTL, 0x05);/* CH0,1 read */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC1_CNTL, 0x05);/* CH2,3 read */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_CPL_CNTL, 0x100005);/*Read */ + + udelay(10); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC0_CNTL, 0x400005);/* CH0,1 write */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_MC1_CNTL, 0x400005);/* CH2,3 write */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixLCAC_CPL_CNTL, 0x500005);/* write */ + + } + + return 0; +} + +int iceland_start_dpm(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* enable general power management */ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 1); + /* enable sclk deep sleep */ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 1); + + /* prepare for PCIE DPM */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SOFT_REGISTERS_TABLE_12, VoltageChangeTimeout, 0x1000); + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, SWRST_COMMAND_1, RESETLC, 0x0); + + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_Voltage_Cntl_Enable)), + "Failed to enable voltage DPM during DPM Start Function!", + return -1); + + if (0 != iceland_enable_sclk_mclk_dpm(hwmgr)) { + PP_ASSERT_WITH_CODE(0, "Failed to enable Sclk DPM and Mclk DPM!", return -1); + } + + /* enable PCIE dpm */ + if (0 == data->pcie_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_Enable)), + "Failed to enable pcie DPM during DPM Start Function!", + return -1 + ); + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_Falcon_QuickTransition)) { + smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_EnableACDCGPIOInterrupt); + } + + return 0; +} + +static void iceland_set_dpm_event_sources(struct pp_hwmgr *hwmgr, + uint32_t sources) +{ + bool protection; + enum DPM_EVENT_SRC src; + + switch (sources) { + default: + printk(KERN_ERR "Unknown throttling event sources."); + /* fall through */ + case 0: + protection = false; + /* src is unused */ + break; + case (1 << PHM_AutoThrottleSource_Thermal): + protection = true; + src = DPM_EVENT_SRC_DIGITAL; + break; + case (1 << PHM_AutoThrottleSource_External): + protection = true; + src = DPM_EVENT_SRC_EXTERNAL; + break; + case (1 << PHM_AutoThrottleSource_External) | + (1 << PHM_AutoThrottleSource_Thermal): + protection = true; + src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL; + break; + } + /* Order matters - don't enable thermal protection for the wrong source. */ + if (protection) { + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, + DPM_EVENT_SRC, src); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, + THERMAL_PROTECTION_DIS, + !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalController)); + } else + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, + THERMAL_PROTECTION_DIS, 1); +} + +static int iceland_enable_auto_throttle_source(struct pp_hwmgr *hwmgr, + PHM_AutoThrottleSource source) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + if (!(data->active_auto_throttle_sources & (1 << source))) { + data->active_auto_throttle_sources |= 1 << source; + iceland_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); + } + return 0; +} + +static int iceland_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) +{ + return iceland_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); +} + +static int iceland_tf_start_smc(struct pp_hwmgr *hwmgr) +{ + int ret = 0; + + if (!iceland_is_smc_ram_running(hwmgr->smumgr)) + ret = iceland_smu_start_smc(hwmgr->smumgr); + + return ret; +} + +/** +* Programs the Deep Sleep registers +* +* @param pHwMgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data (PhwEvergreen_DisplayConfiguration) +* @param pOutput the pointer to output data (unused) +* @param pStorage the pointer to temporary storage (unused) +* @param Result the last failure code (unused) +* @return always 0 +*/ +static int iceland_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) +{ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) { + if (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MASTER_DeepSleep_ON) != 0) + PP_ASSERT_WITH_CODE(false, + "Attempt to enable Master Deep Sleep switch failed!", + return -EINVAL); + } else { + if (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MASTER_DeepSleep_OFF) != 0) + PP_ASSERT_WITH_CODE(false, + "Attempt to disable Master Deep Sleep switch failed!", + return -EINVAL); + } + + return 0; +} + +static int iceland_enable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + int tmp_result, result = 0; + + if (cf_iceland_voltage_control(hwmgr)) { + tmp_result = iceland_enable_voltage_control(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable voltage control!", return tmp_result); + + tmp_result = iceland_construct_voltage_tables(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to contruct voltage tables!", return tmp_result); + } + + tmp_result = iceland_initialize_mc_reg_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to initialize MC reg table!", return tmp_result); + + tmp_result = iceland_program_static_screen_threshold_parameters(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to program static screen threshold parameters!", return tmp_result); + + tmp_result = iceland_enable_display_gap(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable display gap!", return tmp_result); + + tmp_result = iceland_program_voting_clients(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to program voting clients!", return tmp_result); + + tmp_result = iceland_upload_firmware(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to upload firmware header!", return tmp_result); + + tmp_result = iceland_process_firmware_header(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to process firmware header!", return tmp_result); + + tmp_result = iceland_initial_switch_from_arb_f0_to_f1(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to initialize switch from ArbF0 to F1!", return tmp_result); + + tmp_result = iceland_init_smc_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to initialize SMC table!", return tmp_result); + + tmp_result = iceland_populate_initial_mc_reg_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to populate initialize MC Reg table!", return tmp_result); + + tmp_result = iceland_populate_pm_fuses(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to populate PM fuses!", return tmp_result); + + /* start SMC */ + tmp_result = iceland_tf_start_smc(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to start SMC!", return tmp_result); + + /* enable SCLK control */ + tmp_result = iceland_enable_sclk_control(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable SCLK control!", return tmp_result); + + tmp_result = iceland_enable_deep_sleep_master_switch(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to enable deep sleep!", return tmp_result); + + /* enable DPM */ + tmp_result = iceland_start_dpm(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to start DPM!", return tmp_result); + + tmp_result = iceland_enable_smc_cac(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable SMC CAC!", return tmp_result); + + tmp_result = iceland_enable_power_containment(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable power containment!", return tmp_result); + + tmp_result = iceland_power_control_set_level(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to power control set level!", result = tmp_result); + + tmp_result = iceland_enable_thermal_auto_throttle(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to enable thermal auto throttle!", result = tmp_result); + + return result; +} + +static int iceland_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) +{ + return phm_hwmgr_backend_fini(hwmgr); +} + +static void iceland_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct phw_iceland_ulv_parm *ulv; + + ulv = &data->ulv; + ulv->ch_ulv_parameter = PPICELAND_CGULVPARAMETER_DFLT; + data->voting_rights_clients0 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT0; + data->voting_rights_clients1 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT1; + data->voting_rights_clients2 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT2; + data->voting_rights_clients3 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT3; + data->voting_rights_clients4 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT4; + data->voting_rights_clients5 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT5; + data->voting_rights_clients6 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT6; + data->voting_rights_clients7 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT7; + + data->static_screen_threshold_unit = PPICELAND_STATICSCREENTHRESHOLDUNIT_DFLT; + data->static_screen_threshold = PPICELAND_STATICSCREENTHRESHOLD_DFLT; + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ABM); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_NonABMSupportInPPLib); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicACTiming); + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableMemoryTransition); + + iceland_initialize_power_tune_defaults(hwmgr); + + data->mclk_strobe_mode_threshold = 40000; + data->mclk_stutter_mode_threshold = 30000; + data->mclk_edc_enable_threshold = 40000; + data->mclk_edc_wr_enable_threshold = 40000; + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableMCLS); + + data->pcie_gen_performance.max = PP_PCIEGen1; + data->pcie_gen_performance.min = PP_PCIEGen3; + data->pcie_gen_power_saving.max = PP_PCIEGen1; + data->pcie_gen_power_saving.min = PP_PCIEGen3; + + data->pcie_lane_performance.max = 0; + data->pcie_lane_performance.min = 16; + data->pcie_lane_power_saving.max = 0; + data->pcie_lane_power_saving.min = 16; + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification); +} + +static int iceland_get_evv_voltage(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + uint16_t virtual_voltage_id; + uint16_t vddc = 0; + uint16_t i; + + /* the count indicates actual number of entries */ + data->vddc_leakage.count = 0; + data->vddci_leakage.count = 0; + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) { + pr_err("Iceland should always support EVV\n"); + return -EINVAL; + } + + /* retrieve voltage for leakage ID (0xff01 + i) */ + for (i = 0; i < ICELAND_MAX_LEAKAGE_COUNT; i++) { + virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; + + PP_ASSERT_WITH_CODE((0 == atomctrl_get_voltage_evv(hwmgr, virtual_voltage_id, &vddc)), + "Error retrieving EVV voltage value!\n", continue); + + if (vddc >= 2000) + pr_warning("Invalid VDDC value!\n"); + + if (vddc != 0 && vddc != virtual_voltage_id) { + data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc; + data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id; + data->vddc_leakage.count++; + } + } + + return 0; +} + +static void iceland_patch_with_vddc_leakage(struct pp_hwmgr *hwmgr, + uint32_t *vddc) +{ + iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint32_t leakage_index; + struct phw_iceland_leakage_voltage *leakage_table = &data->vddc_leakage; + + /* search for leakage voltage ID 0xff01 ~ 0xff08 */ + for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { + /* + * If this voltage matches a leakage voltage ID, patch + * with actual leakage voltage. + */ + if (leakage_table->leakage_id[leakage_index] == *vddc) { + /* + * Need to make sure vddc is less than 2v or + * else, it could burn the ASIC. + */ + if (leakage_table->actual_voltage[leakage_index] >= 2000) + pr_warning("Invalid VDDC value!\n"); + *vddc = leakage_table->actual_voltage[leakage_index]; + /* we found leakage voltage */ + break; + } + } + + if (*vddc >= ATOM_VIRTUAL_VOLTAGE_ID0) + pr_warning("Voltage value looks like a Leakage ID but it's not patched\n"); +} + +static void iceland_patch_with_vddci_leakage(struct pp_hwmgr *hwmgr, + uint32_t *vddci) +{ + iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint32_t leakage_index; + struct phw_iceland_leakage_voltage *leakage_table = &data->vddci_leakage; + + /* search for leakage voltage ID 0xff01 ~ 0xff08 */ + for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { + /* + * If this voltage matches a leakage voltage ID, patch + * with actual leakage voltage. + */ + if (leakage_table->leakage_id[leakage_index] == *vddci) { + *vddci = leakage_table->actual_voltage[leakage_index]; + /* we found leakage voltage */ + break; + } + } + + if (*vddci >= ATOM_VIRTUAL_VOLTAGE_ID0) + pr_warning("Voltage value looks like a Leakage ID but it's not patched\n"); +} + +static int iceland_patch_vddc(struct pp_hwmgr *hwmgr, + struct phm_clock_voltage_dependency_table *tab) +{ + uint16_t i; + + if (tab) + for (i = 0; i < tab->count; i++) + iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v); + + return 0; +} + +static int iceland_patch_vddci(struct pp_hwmgr *hwmgr, + struct phm_clock_voltage_dependency_table *tab) +{ + uint16_t i; + + if (tab) + for (i = 0; i < tab->count; i++) + iceland_patch_with_vddci_leakage(hwmgr, &tab->entries[i].v); + + return 0; +} + +static int iceland_patch_vce_vddc(struct pp_hwmgr *hwmgr, + struct phm_vce_clock_voltage_dependency_table *tab) +{ + uint16_t i; + + if (tab) + for (i = 0; i < tab->count; i++) + iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v); + + return 0; +} + + +static int iceland_patch_uvd_vddc(struct pp_hwmgr *hwmgr, + struct phm_uvd_clock_voltage_dependency_table *tab) +{ + uint16_t i; + + if (tab) + for (i = 0; i < tab->count; i++) + iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v); + + return 0; +} + +static int iceland_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr, + struct phm_phase_shedding_limits_table *tab) +{ + uint16_t i; + + if (tab) + for (i = 0; i < tab->count; i++) + iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].Voltage); + + return 0; +} + +static int iceland_patch_samu_vddc(struct pp_hwmgr *hwmgr, + struct phm_samu_clock_voltage_dependency_table *tab) +{ + uint16_t i; + + if (tab) + for (i = 0; i < tab->count; i++) + iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v); + + return 0; +} + +static int iceland_patch_acp_vddc(struct pp_hwmgr *hwmgr, + struct phm_acp_clock_voltage_dependency_table *tab) +{ + uint16_t i; + + if (tab) + for (i = 0; i < tab->count; i++) + iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v); + + return 0; +} + +static int iceland_patch_limits_vddc(struct pp_hwmgr *hwmgr, + struct phm_clock_and_voltage_limits *tab) +{ + if (tab) { + iceland_patch_with_vddc_leakage(hwmgr, (uint32_t *)&tab->vddc); + iceland_patch_with_vddci_leakage(hwmgr, (uint32_t *)&tab->vddci); + } + + return 0; +} + +static int iceland_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab) +{ + uint32_t i; + uint32_t vddc; + + if (tab) { + for (i = 0; i < tab->count; i++) { + vddc = (uint32_t)(tab->entries[i].Vddc); + iceland_patch_with_vddc_leakage(hwmgr, &vddc); + tab->entries[i].Vddc = (uint16_t)vddc; + } + } + + return 0; +} + +static int iceland_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr) +{ + int tmp; + + tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc); + if(tmp) + return -EINVAL; + + tmp = iceland_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table); + if(tmp) + return -EINVAL; + + return 0; +} + +static int iceland_set_private_var_based_on_pptale(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk; + struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk; + struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk; + + PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL, + "VDDC dependency on SCLK table is missing. This table is mandatory\n", return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1, + "VDDC dependency on SCLK table has to have is missing. This table is mandatory\n", return -EINVAL); + + PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL, + "VDDC dependency on MCLK table is missing. This table is mandatory\n", return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1, + "VDD dependency on MCLK table has to have is missing. This table is mandatory\n", return -EINVAL); + + data->min_vddc_in_pp_table = (uint16_t)allowed_sclk_vddc_table->entries[0].v; + data->max_vddc_in_pp_table = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; + + hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = + allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; + hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = + allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk; + hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = + allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; + + if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) { + data->min_vddci_in_pp_table = (uint16_t)allowed_mclk_vddci_table->entries[0].v; + data->max_vddci_in_pp_table = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; + } + + if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1) + hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v; + + return 0; +} + +static int iceland_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr) +{ + uint32_t table_size; + struct phm_clock_voltage_dependency_table *table_clk_vlt; + + hwmgr->dyn_state.mclk_sclk_ratio = 4; + hwmgr->dyn_state.sclk_mclk_delta = 15000; /* 150 MHz */ + hwmgr->dyn_state.vddc_vddci_delta = 200; /* 200mV */ + + /* initialize vddc_dep_on_dal_pwrl table */ + table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record); + table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL); + + if (NULL == table_clk_vlt) { + pr_err("[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n"); + return -ENOMEM; + } else { + table_clk_vlt->count = 4; + table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW; + table_clk_vlt->entries[0].v = 0; + table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW; + table_clk_vlt->entries[1].v = 720; + table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL; + table_clk_vlt->entries[2].v = 810; + table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE; + table_clk_vlt->entries[3].v = 900; + hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt; + } + + return 0; +} + +/** + * Initializes the Volcanic Islands Hardware Manager + * + * @param hwmgr the address of the powerplay hardware manager. + * @return 1 if success; otherwise appropriate error code. + */ +static int iceland_hwmgr_backend_init(struct pp_hwmgr *hwmgr) +{ + int result = 0; + SMU71_Discrete_DpmTable *table = NULL; + iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; + bool stay_in_boot; + struct phw_iceland_ulv_parm *ulv; + struct cgs_system_info sys_info = {0}; + + PP_ASSERT_WITH_CODE((NULL != hwmgr), + "Invalid Parameter!", return -EINVAL;); + + data->dll_defaule_on = 0; + data->sram_end = SMC_RAM_END; + + data->activity_target[0] = PPICELAND_TARGETACTIVITY_DFLT; + data->activity_target[1] = PPICELAND_TARGETACTIVITY_DFLT; + data->activity_target[2] = PPICELAND_TARGETACTIVITY_DFLT; + data->activity_target[3] = PPICELAND_TARGETACTIVITY_DFLT; + data->activity_target[4] = PPICELAND_TARGETACTIVITY_DFLT; + data->activity_target[5] = PPICELAND_TARGETACTIVITY_DFLT; + data->activity_target[6] = PPICELAND_TARGETACTIVITY_DFLT; + data->activity_target[7] = PPICELAND_TARGETACTIVITY_DFLT; + + data->mclk_activity_target = PPICELAND_MCLK_TARGETACTIVITY_DFLT; + + data->sclk_dpm_key_disabled = 0; + data->mclk_dpm_key_disabled = 0; + data->pcie_dpm_key_disabled = 0; + data->pcc_monitor_enabled = 0; + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UnTabledHardwareInterface); + + data->gpio_debug = 0; + data->engine_clock_data = 0; + data->memory_clock_data = 0; + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleepAboveLow); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicPatchPowerState); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface); + + /* Initializes DPM default values. */ + iceland_initialize_dpm_defaults(hwmgr); + + /* Enable Platform EVV support. */ + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EVV); + + /* Get leakage voltage based on leakage ID. */ + result = iceland_get_evv_voltage(hwmgr); + if (result) + goto failed; + + /** + * Patch our voltage dependency table with actual leakage + * voltage. We need to perform leakage translation before it's + * used by other functions such as + * iceland_set_hwmgr_variables_based_on_pptable. + */ + result = iceland_patch_dependency_tables_with_leakage(hwmgr); + if (result) + goto failed; + + /* Parse pptable data read from VBIOS. */ + result = iceland_set_private_var_based_on_pptale(hwmgr); + if (result) + goto failed; + + /* ULV support */ + ulv = &(data->ulv); + ulv->ulv_supported = 1; + + /* Initalize Dynamic State Adjustment Rule Settings*/ + result = iceland_initializa_dynamic_state_adjustment_rule_settings(hwmgr); + if (result) { + pr_err("[ powerplay ] iceland_initializa_dynamic_state_adjustment_rule_settings failed!\n"); + goto failed; + } + + data->voltage_control = ICELAND_VOLTAGE_CONTROL_NONE; + data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_NONE; + data->mvdd_control = ICELAND_VOLTAGE_CONTROL_NONE; + + /* + * Hardcode thermal temperature settings for now, these will + * be overwritten if a custom policy exists. + */ + data->thermal_temp_setting.temperature_low = 99500; + data->thermal_temp_setting.temperature_high = 100000; + data->thermal_temp_setting.temperature_shutdown = 104000; + data->uvd_enabled = false; + + table = &data->smc_state_table; + + if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, + &gpio_pin_assignment)) { + table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } else { + table->VRHotGpio = ICELAND_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } + + if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID, + &gpio_pin_assignment)) { + table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } else { + table->AcDcGpio = ICELAND_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } + + /* + * If ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, Peak. + * Current Control feature is enabled and we should program + * PCC HW register + */ + if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, + &gpio_pin_assignment)) { + uint32_t temp_reg = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, + ixCNB_PWRMGT_CNTL); + + switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) { + case 0: + temp_reg = PHM_SET_FIELD(temp_reg, + CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1); + break; + case 1: + temp_reg = PHM_SET_FIELD(temp_reg, + CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2); + break; + case 2: + temp_reg = PHM_SET_FIELD(temp_reg, + CNB_PWRMGT_CNTL, GNB_SLOW, 0x1); + break; + case 3: + temp_reg = PHM_SET_FIELD(temp_reg, + CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1); + break; + case 4: + temp_reg = PHM_SET_FIELD(temp_reg, + CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1); + break; + default: + pr_warning("[ powerplay ] Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!\n"); + break; + } + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCNB_PWRMGT_CNTL, temp_reg); + } + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableSMU7ThermalManagement); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SMU7); + + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDC, + VOLTAGE_OBJ_GPIO_LUT)) + data->voltage_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO; + else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDC, + VOLTAGE_OBJ_SVID2)) + data->voltage_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDCI)) { + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDCI, + VOLTAGE_OBJ_GPIO_LUT)) + data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO; + else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_VDDCI, + VOLTAGE_OBJ_SVID2)) + data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2; + } + + if (data->vdd_ci_control == ICELAND_VOLTAGE_CONTROL_NONE) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDCI); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableMVDDControl)) { + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_MVDDC, + VOLTAGE_OBJ_GPIO_LUT)) + data->mvdd_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO; + else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, + VOLTAGE_TYPE_MVDDC, + VOLTAGE_OBJ_SVID2)) + data->mvdd_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2; + } + + if (data->mvdd_control == ICELAND_VOLTAGE_CONTROL_NONE) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableMVDDControl); + + data->vddc_phase_shed_control = false; + + stay_in_boot = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StayInBootState); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicPowerManagement); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ActivityReporting); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_GFXClockGatingSupport); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MemorySpreadSpectrumSupport); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EngineSpreadSpectrumSupport); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicPCIEGen2Support); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SMC); + + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisablePowerGating); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_BACO); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalAutoThrottling); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableLSClockGating); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SamuDPM); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AcpDPM); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_OD6inACSupport); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnablePlatformPowerManagement); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PauseMMSessions); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_OD6PlusinACSupport); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PauseMMSessions); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_GFXClockGatingManagedInCAIL); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_IcelandULPSSWWorkAround); + + + /* iceland doesn't support UVD and VCE */ + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDPowerGating); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEPowerGating); + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS; + result = cgs_query_system_info(hwmgr->device, &sys_info); + if (!result) { + if (sys_info.value & AMD_PG_SUPPORT_UVD) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDPowerGating); + if (sys_info.value & AMD_PG_SUPPORT_VCE) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEPowerGating); + + data->is_tlu_enabled = false; + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = + ICELAND_MAX_HARDWARE_POWERLEVELS; + hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; + hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; + result = cgs_query_system_info(hwmgr->device, &sys_info); + if (result) + data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK; + else + data->pcie_gen_cap = (uint32_t)sys_info.value; + if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) + data->pcie_spc_cap = 20; + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; + result = cgs_query_system_info(hwmgr->device, &sys_info); + if (result) + data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK; + else + data->pcie_lane_cap = (uint32_t)sys_info.value; + } else { + /* Ignore return value in here, we are cleaning up a mess. */ + iceland_hwmgr_backend_fini(hwmgr); + } + + return 0; +failed: + return result; +} + +static int iceland_get_num_of_entries(struct pp_hwmgr *hwmgr) +{ + int result; + unsigned long ret = 0; + + result = pp_tables_get_num_of_entries(hwmgr, &ret); + + return result ? 0 : ret; +} + +static const unsigned long PhwIceland_Magic = (unsigned long)(PHM_VIslands_Magic); + +struct iceland_power_state *cast_phw_iceland_power_state( + struct pp_hw_power_state *hw_ps) +{ + if (hw_ps == NULL) + return NULL; + + PP_ASSERT_WITH_CODE((PhwIceland_Magic == hw_ps->magic), + "Invalid Powerstate Type!", + return NULL); + + return (struct iceland_power_state *)hw_ps; +} + +static int iceland_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, + struct pp_power_state *prequest_ps, + const struct pp_power_state *pcurrent_ps) +{ + struct iceland_power_state *iceland_ps = + cast_phw_iceland_power_state(&prequest_ps->hardware); + + uint32_t sclk; + uint32_t mclk; + struct PP_Clocks minimum_clocks = {0}; + bool disable_mclk_switching; + bool disable_mclk_switching_for_frame_lock; + struct cgs_display_info info = {0}; + const struct phm_clock_and_voltage_limits *max_limits; + uint32_t i; + iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + int32_t count; + int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; + + data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); + + PP_ASSERT_WITH_CODE(iceland_ps->performance_level_count == 2, + "VI should always have 2 performance levels", + ); + + max_limits = (PP_PowerSource_AC == hwmgr->power_source) ? + &(hwmgr->dyn_state.max_clock_voltage_on_ac) : + &(hwmgr->dyn_state.max_clock_voltage_on_dc); + + if (PP_PowerSource_DC == hwmgr->power_source) { + for (i = 0; i < iceland_ps->performance_level_count; i++) { + if (iceland_ps->performance_levels[i].memory_clock > max_limits->mclk) + iceland_ps->performance_levels[i].memory_clock = max_limits->mclk; + if (iceland_ps->performance_levels[i].engine_clock > max_limits->sclk) + iceland_ps->performance_levels[i].engine_clock = max_limits->sclk; + } + } + + iceland_ps->vce_clocks.EVCLK = hwmgr->vce_arbiter.evclk; + iceland_ps->vce_clocks.ECCLK = hwmgr->vce_arbiter.ecclk; + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) { + + max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); + stable_pstate_sclk = (max_limits->sclk * 75) / 100; + + for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; count >= 0; count--) { + if (stable_pstate_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) { + stable_pstate_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk; + break; + } + } + + if (count < 0) + stable_pstate_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk; + + stable_pstate_mclk = max_limits->mclk; + + minimum_clocks.engineClock = stable_pstate_sclk; + minimum_clocks.memoryClock = stable_pstate_mclk; + } + + if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk) + minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk; + + if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk) + minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk; + + iceland_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold; + + if (0 != hwmgr->gfx_arbiter.sclk_over_drive) { + PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.engineClock), + "Overdrive sclk exceeds limit", + hwmgr->gfx_arbiter.sclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.engineClock); + + if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk) + iceland_ps->performance_levels[1].engine_clock = hwmgr->gfx_arbiter.sclk_over_drive; + } + + if (0 != hwmgr->gfx_arbiter.mclk_over_drive) { + PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.memoryClock), + "Overdrive mclk exceeds limit", + hwmgr->gfx_arbiter.mclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.memoryClock); + + if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk) + iceland_ps->performance_levels[1].memory_clock = hwmgr->gfx_arbiter.mclk_over_drive; + } + + disable_mclk_switching_for_frame_lock = phm_cap_enabled( + hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); + + disable_mclk_switching = (1 < info.display_count) || + disable_mclk_switching_for_frame_lock; + + sclk = iceland_ps->performance_levels[0].engine_clock; + mclk = iceland_ps->performance_levels[0].memory_clock; + + if (disable_mclk_switching) + mclk = iceland_ps->performance_levels[iceland_ps->performance_level_count - 1].memory_clock; + + if (sclk < minimum_clocks.engineClock) + sclk = (minimum_clocks.engineClock > max_limits->sclk) ? max_limits->sclk : minimum_clocks.engineClock; + + if (mclk < minimum_clocks.memoryClock) + mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? max_limits->mclk : minimum_clocks.memoryClock; + + iceland_ps->performance_levels[0].engine_clock = sclk; + iceland_ps->performance_levels[0].memory_clock = mclk; + + iceland_ps->performance_levels[1].engine_clock = + (iceland_ps->performance_levels[1].engine_clock >= iceland_ps->performance_levels[0].engine_clock) ? + iceland_ps->performance_levels[1].engine_clock : + iceland_ps->performance_levels[0].engine_clock; + + if (disable_mclk_switching) { + if (mclk < iceland_ps->performance_levels[1].memory_clock) + mclk = iceland_ps->performance_levels[1].memory_clock; + + iceland_ps->performance_levels[0].memory_clock = mclk; + iceland_ps->performance_levels[1].memory_clock = mclk; + } else { + if (iceland_ps->performance_levels[1].memory_clock < iceland_ps->performance_levels[0].memory_clock) + iceland_ps->performance_levels[1].memory_clock = iceland_ps->performance_levels[0].memory_clock; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) { + for (i=0; i < iceland_ps->performance_level_count; i++) { + iceland_ps->performance_levels[i].engine_clock = stable_pstate_sclk; + iceland_ps->performance_levels[i].memory_clock = stable_pstate_mclk; + iceland_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max; + iceland_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max; + } + } + + return 0; +} + +static bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr) +{ + /* + * We return the status of Voltage Control instead of checking SCLK/MCLK DPM + * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM, + * whereas voltage control is a fundemental change that will not be disabled + */ + return (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) ? 1 : 0); +} + +/** + * force DPM power State + * + * @param hwmgr: the address of the powerplay hardware manager. + * @param n : DPM level + * @return The response that came from the SMC. + */ +int iceland_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ + PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr), + "Trying to force SCLK when DPM is disabled", return -1;); + if (0 == data->sclk_dpm_key_disabled) + return (0 == smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_DPM_ForceState, + n) ? 0 : 1); + + return 0; +} + +/** + * force DPM power State + * + * @param hwmgr: the address of the powerplay hardware manager. + * @param n : DPM level + * @return The response that came from the SMC. + */ +int iceland_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ + PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr), + "Trying to Force MCLK when DPM is disabled", return -1;); + if (0 == data->mclk_dpm_key_disabled) + return (0 == smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_ForceState, + n) ? 0 : 1); + + return 0; +} + +/** + * force DPM power State + * + * @param hwmgr: the address of the powerplay hardware manager. + * @param n : DPM level + * @return The response that came from the SMC. + */ +int iceland_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ + PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr), + "Trying to Force PCIE level when DPM is disabled", return -1;); + if (0 == data->pcie_dpm_key_disabled) + return (0 == smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_ForceLevel, + n) ? 0 : 1); + + return 0; +} + +static int iceland_force_dpm_highest(struct pp_hwmgr *hwmgr) +{ + uint32_t level, tmp; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + if (0 == data->sclk_dpm_key_disabled) { + /* SCLK */ + if (data->dpm_level_enable_mask.sclk_dpm_enable_mask != 0) { + level = 0; + tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; + while (tmp >>= 1) + level++ ; + + if (0 != level) { + PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state(hwmgr, level)), + "force highest sclk dpm state failed!", return -1); + PHM_WAIT_INDIRECT_FIELD(hwmgr->device, + SMC_IND, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX, level); + } + } + } + + if (0 == data->mclk_dpm_key_disabled) { + /* MCLK */ + if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) { + level = 0; + tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask; + while (tmp >>= 1) + level++ ; + + if (0 != level) { + PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state_mclk(hwmgr, level)), + "force highest mclk dpm state failed!", return -1); + PHM_WAIT_INDIRECT_FIELD(hwmgr->device, SMC_IND, + TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX, level); + } + } + } + + if (0 == data->pcie_dpm_key_disabled) { + /* PCIE */ + if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) { + level = 0; + tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask; + while (tmp >>= 1) + level++ ; + + if (0 != level) { + PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state_pcie(hwmgr, level)), + "force highest pcie dpm state failed!", return -1); + } + } + } + + return 0; +} + +static uint32_t iceland_get_lowest_enable_level(struct pp_hwmgr *hwmgr, + uint32_t level_mask) +{ + uint32_t level = 0; + + while (0 == (level_mask & (1 << level))) + level++; + + return level; +} + +static int iceland_force_dpm_lowest(struct pp_hwmgr *hwmgr) +{ + uint32_t level; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + /* for now force only sclk */ + if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) { + level = iceland_get_lowest_enable_level(hwmgr, + data->dpm_level_enable_mask.sclk_dpm_enable_mask); + + PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state(hwmgr, level)), + "force sclk dpm state failed!", return -1); + + PHM_WAIT_INDIRECT_FIELD(hwmgr->device, SMC_IND, + TARGET_AND_CURRENT_PROFILE_INDEX, + CURR_SCLK_INDEX, + level); + } + + return 0; +} + +int iceland_unforce_dpm_levels(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + PP_ASSERT_WITH_CODE (0 == iceland_is_dpm_running(hwmgr), + "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.", + return -1); + + if (0 == data->sclk_dpm_key_disabled) { + PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc( + hwmgr->smumgr, + PPSMC_MSG_NoForcedLevel)), + "unforce sclk dpm state failed!", + return -1); + } + + if (0 == data->mclk_dpm_key_disabled) { + PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc( + hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_NoForcedLevel)), + "unforce mclk dpm state failed!", + return -1); + } + + if (0 == data->pcie_dpm_key_disabled) { + PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc( + hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_UnForceLevel)), + "unforce pcie level failed!", + return -1); + } + + return 0; +} + +static int iceland_force_dpm_level(struct pp_hwmgr *hwmgr, + enum amd_dpm_forced_level level) +{ + int ret = 0; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + ret = iceland_force_dpm_highest(hwmgr); + if (ret) + return ret; + break; + case AMD_DPM_FORCED_LEVEL_LOW: + ret = iceland_force_dpm_lowest(hwmgr); + if (ret) + return ret; + break; + case AMD_DPM_FORCED_LEVEL_AUTO: + ret = iceland_unforce_dpm_levels(hwmgr); + if (ret) + return ret; + break; + default: + break; + } + + hwmgr->dpm_level = level; + return ret; +} + +const struct iceland_power_state *cast_const_phw_iceland_power_state( + const struct pp_hw_power_state *hw_ps) +{ + if (hw_ps == NULL) + return NULL; + + PP_ASSERT_WITH_CODE((PhwIceland_Magic == hw_ps->magic), + "Invalid Powerstate Type!", + return NULL); + + return (const struct iceland_power_state *)hw_ps; +} + +static int iceland_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; + const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state); + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_single_dpm_table *psclk_table = &(data->dpm_table.sclk_table); + uint32_t sclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].engine_clock; + struct iceland_single_dpm_table *pmclk_table = &(data->dpm_table.mclk_table); + uint32_t mclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].memory_clock; + struct PP_Clocks min_clocks = {0}; + uint32_t i; + struct cgs_display_info info = {0}; + + data->need_update_smu7_dpm_table = 0; + + for (i = 0; i < psclk_table->count; i++) { + if (sclk == psclk_table->dpm_levels[i].value) + break; + } + + if (i >= psclk_table->count) + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + else { + /* + * TODO: Check SCLK in DAL's minimum clocks in case DeepSleep + * divider update is required. + */ + if(data->display_timing.min_clock_insr != min_clocks.engineClockInSR) + data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; + } + + for (i = 0; i < pmclk_table->count; i++) { + if (mclk == pmclk_table->dpm_levels[i].value) + break; + } + + if (i >= pmclk_table->count) + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (data->display_timing.num_existing_displays != info.display_count) + data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; + + return 0; +} + +static uint16_t iceland_get_maximum_link_speed(struct pp_hwmgr *hwmgr, const struct iceland_power_state *hw_ps) +{ + uint32_t i; + uint32_t pcie_speed, max_speed = 0; + + for (i = 0; i < hw_ps->performance_level_count; i++) { + pcie_speed = hw_ps->performance_levels[i].pcie_gen; + if (max_speed < pcie_speed) + max_speed = pcie_speed; + } + + return max_speed; +} + +static uint16_t iceland_get_current_pcie_speed(struct pp_hwmgr *hwmgr) +{ + uint32_t speed_cntl = 0; + + speed_cntl = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__PCIE, + ixPCIE_LC_SPEED_CNTL); + return((uint16_t)PHM_GET_FIELD(speed_cntl, + PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); +} + + +static int iceland_request_link_speed_change_before_state_change(struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + const struct iceland_power_state *iceland_nps = cast_const_phw_iceland_power_state(states->pnew_state); + const struct iceland_power_state *iceland_cps = cast_const_phw_iceland_power_state(states->pcurrent_state); + + uint16_t target_link_speed = iceland_get_maximum_link_speed(hwmgr, iceland_nps); + uint16_t current_link_speed; + + if (data->force_pcie_gen == PP_PCIEGenInvalid) + current_link_speed = iceland_get_maximum_link_speed(hwmgr, iceland_cps); + else + current_link_speed = data->force_pcie_gen; + + data->force_pcie_gen = PP_PCIEGenInvalid; + data->pspp_notify_required = false; + if (target_link_speed > current_link_speed) { + switch(target_link_speed) { + case PP_PCIEGen3: + if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false)) + break; + data->force_pcie_gen = PP_PCIEGen2; + if (current_link_speed == PP_PCIEGen2) + break; + case PP_PCIEGen2: + if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false)) + break; + default: + data->force_pcie_gen = iceland_get_current_pcie_speed(hwmgr); + break; + } + } else { + if (target_link_speed < current_link_speed) + data->pspp_notify_required = true; + } + + return 0; +} + +static int iceland_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + if (0 == data->need_update_smu7_dpm_table) + return 0; + + if ((0 == data->sclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { + PP_ASSERT_WITH_CODE( + 0 == iceland_is_dpm_running(hwmgr), + "Trying to freeze SCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE( + 0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_FreezeLevel), + "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", + return -1); + } + + if ((0 == data->mclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & + DPMTABLE_OD_UPDATE_MCLK)) { + PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr), + "Trying to freeze MCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE( + 0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_FreezeLevel), + "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", + return -1); + } + + return 0; +} + +static int iceland_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr, const void *input) +{ + int result = 0; + + const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; + const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state); + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint32_t sclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].engine_clock; + uint32_t mclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].memory_clock; + struct iceland_dpm_table *pdpm_table = &data->dpm_table; + + struct iceland_dpm_table *pgolden_dpm_table = &data->golden_dpm_table; + uint32_t dpm_count, clock_percent; + uint32_t i; + + if (0 == data->need_update_smu7_dpm_table) + return 0; + + if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { + pdpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value = sclk; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { + /* + * Need to do calculation based on the golden DPM table + * as the Heatmap GPU Clock axis is also based on the default values + */ + PP_ASSERT_WITH_CODE( + (pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value != 0), + "Divide by 0!", + return -1); + dpm_count = pdpm_table->sclk_table.count < 2 ? 0 : pdpm_table->sclk_table.count-2; + for (i = dpm_count; i > 1; i--) { + if (sclk > pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value) { + clock_percent = ((sclk - pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value)*100) / + pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value; + + pdpm_table->sclk_table.dpm_levels[i].value = + pgolden_dpm_table->sclk_table.dpm_levels[i].value + + (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100; + + } else if (pgolden_dpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value > sclk) { + clock_percent = ((pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value - sclk)*100) / + pgolden_dpm_table->sclk_table.dpm_levels[pgolden_dpm_table->sclk_table.count-1].value; + + pdpm_table->sclk_table.dpm_levels[i].value = + pgolden_dpm_table->sclk_table.dpm_levels[i].value - + (pgolden_dpm_table->sclk_table.dpm_levels[i].value * clock_percent)/100; + } else + pdpm_table->sclk_table.dpm_levels[i].value = + pgolden_dpm_table->sclk_table.dpm_levels[i].value; + } + } + } + + if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { + pdpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value = mclk; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) { + + PP_ASSERT_WITH_CODE( + (pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value != 0), + "Divide by 0!", + return -1); + dpm_count = pdpm_table->mclk_table.count < 2? 0 : pdpm_table->mclk_table.count-2; + for (i = dpm_count; i > 1; i--) { + if (mclk > pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value) { + clock_percent = ((mclk - pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value)*100) / + pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value; + + pdpm_table->mclk_table.dpm_levels[i].value = + pgolden_dpm_table->mclk_table.dpm_levels[i].value + + (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100; + + } else if (pgolden_dpm_table->mclk_table.dpm_levels[pdpm_table->mclk_table.count-1].value > mclk) { + clock_percent = ((pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value - mclk)*100) / + pgolden_dpm_table->mclk_table.dpm_levels[pgolden_dpm_table->mclk_table.count-1].value; + + pdpm_table->mclk_table.dpm_levels[i].value = + pgolden_dpm_table->mclk_table.dpm_levels[i].value - + (pgolden_dpm_table->mclk_table.dpm_levels[i].value * clock_percent)/100; + } else + pdpm_table->mclk_table.dpm_levels[i].value = pgolden_dpm_table->mclk_table.dpm_levels[i].value; + } + } + } + + + if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { + result = iceland_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", + return result); + } + + if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { + /*populate MCLK dpm table to SMU7 */ + result = iceland_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", + return result); + } + + return result; +} + +static int iceland_trim_single_dpm_states(struct pp_hwmgr *hwmgr, + struct iceland_single_dpm_table *pdpm_table, + uint32_t low_limit, uint32_t high_limit) +{ + uint32_t i; + + for (i = 0; i < pdpm_table->count; i++) { + if ((pdpm_table->dpm_levels[i].value < low_limit) || + (pdpm_table->dpm_levels[i].value > high_limit)) + pdpm_table->dpm_levels[i].enabled = false; + else + pdpm_table->dpm_levels[i].enabled = true; + } + return 0; +} + +static int iceland_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct iceland_power_state *hw_state) +{ + int result = 0; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint32_t high_limit_count; + + PP_ASSERT_WITH_CODE((hw_state->performance_level_count >= 1), + "power state did not have any performance level", + return -1); + + high_limit_count = (1 == hw_state->performance_level_count) ? 0: 1; + + iceland_trim_single_dpm_states(hwmgr, &(data->dpm_table.sclk_table), + hw_state->performance_levels[0].engine_clock, + hw_state->performance_levels[high_limit_count].engine_clock); + + iceland_trim_single_dpm_states(hwmgr, &(data->dpm_table.mclk_table), + hw_state->performance_levels[0].memory_clock, + hw_state->performance_levels[high_limit_count].memory_clock); + + return result; +} + +static int iceland_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input) +{ + int result; + const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state); + + result = iceland_trim_dpm_states(hwmgr, iceland_ps); + if (0 != result) + return result; + + data->dpm_level_enable_mask.sclk_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table); + data->dpm_level_enable_mask.mclk_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table); + data->last_mclk_dpm_enable_mask = data->dpm_level_enable_mask.mclk_dpm_enable_mask; + if (data->uvd_enabled && (data->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)) + data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; + + data->dpm_level_enable_mask.pcie_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table); + + return 0; +} + +static int iceland_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input) +{ + return 0; +} + +int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + int result = 0; + uint32_t low_sclk_interrupt_threshold = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification) + && (hwmgr->gfx_arbiter.sclk_threshold != data->low_sclk_interrupt_threshold)) { + data->low_sclk_interrupt_threshold = hwmgr->gfx_arbiter.sclk_threshold; + low_sclk_interrupt_threshold = data->low_sclk_interrupt_threshold; + + CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); + + result = iceland_copy_bytes_to_smc( + hwmgr->smumgr, + data->dpm_table_start + offsetof(SMU71_Discrete_DpmTable, + LowSclkInterruptThreshold), + (uint8_t *)&low_sclk_interrupt_threshold, + sizeof(uint32_t), + data->sram_end + ); + } + + return result; +} + +static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + uint32_t address; + int32_t result; + + if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) + return 0; + + + memset(&data->mc_reg_table, 0, sizeof(SMU71_Discrete_MCRegisters)); + + result = iceland_convert_mc_reg_table_to_smc(hwmgr, &(data->mc_reg_table)); + + if(result != 0) + return result; + + + address = data->mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]); + + return iceland_copy_bytes_to_smc(hwmgr->smumgr, address, + (uint8_t *)&data->mc_reg_table.data[0], + sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count, + data->sram_end); +} + +static int iceland_program_memory_timing_parameters_conditionally(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK)) + return iceland_program_memory_timing_parameters(hwmgr); + + return 0; +} + +static int iceland_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + if (0 == data->need_update_smu7_dpm_table) + return 0; + + if ((0 == data->sclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { + + PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr), + "Trying to Unfreeze SCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE( + 0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_UnfreezeLevel), + "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", + return -1); + } + + if ((0 == data->mclk_dpm_key_disabled) && + (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { + + PP_ASSERT_WITH_CODE( + 0 == iceland_is_dpm_running(hwmgr), + "Trying to Unfreeze MCLK DPM when DPM is disabled", + ); + PP_ASSERT_WITH_CODE( + 0 == smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_UnfreezeLevel), + "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", + return -1); + } + + data->need_update_smu7_dpm_table = 0; + + return 0; +} + +static int iceland_notify_link_speed_change_after_state_change(struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state); + uint16_t target_link_speed = iceland_get_maximum_link_speed(hwmgr, iceland_ps); + uint8_t request; + + if (data->pspp_notify_required || + data->pcie_performance_request) { + if (target_link_speed == PP_PCIEGen3) + request = PCIE_PERF_REQ_GEN3; + else if (target_link_speed == PP_PCIEGen2) + request = PCIE_PERF_REQ_GEN2; + else + request = PCIE_PERF_REQ_GEN1; + + if(request == PCIE_PERF_REQ_GEN1 && iceland_get_current_pcie_speed(hwmgr) > 0) { + data->pcie_performance_request = false; + return 0; + } + + if (0 != acpi_pcie_perf_request(hwmgr->device, request, false)) { + if (PP_PCIEGen2 == target_link_speed) + printk("PSPP request to switch to Gen2 from Gen3 Failed!"); + else + printk("PSPP request to switch to Gen1 from Gen2 Failed!"); + } + } + + data->pcie_performance_request = false; + return 0; +} + +int iceland_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) +{ + PPSMC_Result result; + iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend); + + if (0 == data->sclk_dpm_key_disabled) { + /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ + if (0 != iceland_is_dpm_running(hwmgr)) + printk(KERN_ERR "[ powerplay ] Trying to set Enable Sclk Mask when DPM is disabled \n"); + + if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) { + result = smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + (PPSMC_Msg)PPSMC_MSG_SCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.sclk_dpm_enable_mask); + PP_ASSERT_WITH_CODE((0 == result), + "Set Sclk Dpm enable Mask failed", return -1); + } + } + + if (0 == data->mclk_dpm_key_disabled) { + /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ + if (0 != iceland_is_dpm_running(hwmgr)) + printk(KERN_ERR "[ powerplay ] Trying to set Enable Mclk Mask when DPM is disabled \n"); + + if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) { + result = smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + (PPSMC_Msg)PPSMC_MSG_MCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.mclk_dpm_enable_mask); + PP_ASSERT_WITH_CODE((0 == result), + "Set Mclk Dpm enable Mask failed", return -1); + } + } + + return 0; +} + +static int iceland_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) +{ + int tmp_result, result = 0; + + tmp_result = iceland_find_dpm_states_clocks_in_dpm_table(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to find DPM states clocks in DPM table!", result = tmp_result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) { + tmp_result = iceland_request_link_speed_change_before_state_change(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to request link speed change before state change!", result = tmp_result); + } + + tmp_result = iceland_freeze_sclk_mclk_dpm(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to freeze SCLK MCLK DPM!", result = tmp_result); + + tmp_result = iceland_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result); + + tmp_result = iceland_generate_dpm_level_enable_mask(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to generate DPM level enabled mask!", result = tmp_result); + + tmp_result = iceland_update_vce_dpm(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update VCE DPM!", result = tmp_result); + + tmp_result = iceland_update_sclk_threshold(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update SCLK threshold!", result = tmp_result); + + tmp_result = iceland_update_and_upload_mc_reg_table(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload MC reg table!", result = tmp_result); + + tmp_result = iceland_program_memory_timing_parameters_conditionally(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to program memory timing parameters!", result = tmp_result); + + tmp_result = iceland_unfreeze_sclk_mclk_dpm(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to unfreeze SCLK MCLK DPM!", result = tmp_result); + + tmp_result = iceland_upload_dpm_level_enable_mask(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to upload DPM level enabled mask!", result = tmp_result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest)) { + tmp_result = iceland_notify_link_speed_change_after_state_change(hwmgr, input); + PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to notify link speed change after state change!", result = tmp_result); + } + + return result; +} + +static int iceland_get_power_state_size(struct pp_hwmgr *hwmgr) +{ + return sizeof(struct iceland_power_state); +} + +static int iceland_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) +{ + struct pp_power_state *ps; + struct iceland_power_state *iceland_ps; + + if (hwmgr == NULL) + return -EINVAL; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + iceland_ps = cast_phw_iceland_power_state(&ps->hardware); + + if (low) + return iceland_ps->performance_levels[0].memory_clock; + else + return iceland_ps->performance_levels[iceland_ps->performance_level_count-1].memory_clock; +} + +static int iceland_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) +{ + struct pp_power_state *ps; + struct iceland_power_state *iceland_ps; + + if (hwmgr == NULL) + return -EINVAL; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + iceland_ps = cast_phw_iceland_power_state(&ps->hardware); + + if (low) + return iceland_ps->performance_levels[0].engine_clock; + else + return iceland_ps->performance_levels[iceland_ps->performance_level_count-1].engine_clock; +} + +static int iceland_get_current_pcie_lane_number( + struct pp_hwmgr *hwmgr) +{ + uint32_t link_width; + + link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__PCIE, + PCIE_LC_LINK_WIDTH_CNTL, + LC_LINK_WIDTH_RD); + + PP_ASSERT_WITH_CODE((7 >= link_width), + "Invalid PCIe lane width!", return 0); + + return decode_pcie_lane_width(link_width); +} + +static int iceland_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, + struct pp_hw_power_state *hw_ps) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_power_state *ps = (struct iceland_power_state *)hw_ps; + ATOM_FIRMWARE_INFO_V2_2 *fw_info; + uint16_t size; + uint8_t frev, crev; + int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); + + /* First retrieve the Boot clocks and VDDC from the firmware info table. + * We assume here that fw_info is unchanged if this call fails. + */ + fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table( + hwmgr->device, index, + &size, &frev, &crev); + if (!fw_info) + /* During a test, there is no firmware info table. */ + return 0; + + /* Patch the state. */ + data->vbios_boot_state.sclk_bootup_value = le32_to_cpu(fw_info->ulDefaultEngineClock); + data->vbios_boot_state.mclk_bootup_value = le32_to_cpu(fw_info->ulDefaultMemoryClock); + data->vbios_boot_state.mvdd_bootup_value = le16_to_cpu(fw_info->usBootUpMVDDCVoltage); + data->vbios_boot_state.vddc_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCVoltage); + data->vbios_boot_state.vddci_bootup_value = le16_to_cpu(fw_info->usBootUpVDDCIVoltage); + data->vbios_boot_state.pcie_gen_bootup_value = iceland_get_current_pcie_speed(hwmgr); + data->vbios_boot_state.pcie_lane_bootup_value = + (uint16_t)iceland_get_current_pcie_lane_number(hwmgr); + + /* set boot power state */ + ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value; + ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value; + ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value; + ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value; + + return 0; +} + +static int iceland_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, + struct pp_hw_power_state *power_state, + unsigned int index, const void *clock_info) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_power_state *iceland_power_state = cast_phw_iceland_power_state(power_state); + const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info; + struct iceland_performance_level *performance_level; + uint32_t engine_clock, memory_clock; + uint16_t pcie_gen_from_bios; + + engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow; + memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow; + + if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk) + data->highest_mclk = memory_clock; + + performance_level = &(iceland_power_state->performance_levels + [iceland_power_state->performance_level_count++]); + + PP_ASSERT_WITH_CODE( + (iceland_power_state->performance_level_count < SMU71_MAX_LEVELS_GRAPHICS), + "Performance levels exceeds SMC limit!", + return -1); + + PP_ASSERT_WITH_CODE( + (iceland_power_state->performance_level_count <= + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), + "Performance levels exceeds Driver limit!", + return -1); + + /* Performance levels are arranged from low to high. */ + performance_level->memory_clock = memory_clock; + performance_level->engine_clock = engine_clock; + + pcie_gen_from_bios = visland_clk_info->ucPCIEGen; + + performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios); + performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane); + + return 0; +} + +static int iceland_get_pp_table_entry(struct pp_hwmgr *hwmgr, + unsigned long entry_index, struct pp_power_state *state) +{ + int result; + struct iceland_power_state *ps; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct phm_clock_voltage_dependency_table *dep_mclk_table = + hwmgr->dyn_state.vddci_dependency_on_mclk; + + memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state)); + + state->hardware.magic = PHM_VIslands_Magic; + + ps = (struct iceland_power_state *)(&state->hardware); + + result = pp_tables_get_entry(hwmgr, entry_index, state, + iceland_get_pp_table_entry_callback_func); + + /* + * This is the earliest time we have all the dependency table + * and the VBIOS boot state as + * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot + * state if there is only one VDDCI/MCLK level, check if it's + * the same as VBIOS boot state + */ + if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { + if (dep_mclk_table->entries[0].clk != + data->vbios_boot_state.mclk_bootup_value) + printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table " + "does not match VBIOS boot MCLK level"); + if (dep_mclk_table->entries[0].v != + data->vbios_boot_state.vddci_bootup_value) + printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table " + "does not match VBIOS boot VDDCI level"); + } + + /* set DC compatible flag if this state supports DC */ + if (!state->validation.disallowOnDC) + ps->dc_compatible = true; + + if (state->classification.flags & PP_StateClassificationFlag_ACPI) + data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; + else if (0 != (state->classification.flags & PP_StateClassificationFlag_Boot)) { + if (data->bacos.best_match == 0xffff) { + /* For C.I. use boot state as base BACO state */ + data->bacos.best_match = PP_StateClassificationFlag_Boot; + data->bacos.performance_level = ps->performance_levels[0]; + } + } + + + ps->uvd_clocks.VCLK = state->uvd_clocks.VCLK; + ps->uvd_clocks.DCLK = state->uvd_clocks.DCLK; + + if (!result) { + uint32_t i; + + switch (state->classification.ui_label) { + case PP_StateUILabel_Performance: + data->use_pcie_performance_levels = true; + + for (i = 0; i < ps->performance_level_count; i++) { + if (data->pcie_gen_performance.max < + ps->performance_levels[i].pcie_gen) + data->pcie_gen_performance.max = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_gen_performance.min > + ps->performance_levels[i].pcie_gen) + data->pcie_gen_performance.min = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_lane_performance.max < + ps->performance_levels[i].pcie_lane) + data->pcie_lane_performance.max = + ps->performance_levels[i].pcie_lane; + + if (data->pcie_lane_performance.min > + ps->performance_levels[i].pcie_lane) + data->pcie_lane_performance.min = + ps->performance_levels[i].pcie_lane; + } + break; + case PP_StateUILabel_Battery: + data->use_pcie_power_saving_levels = true; + + for (i = 0; i < ps->performance_level_count; i++) { + if (data->pcie_gen_power_saving.max < + ps->performance_levels[i].pcie_gen) + data->pcie_gen_power_saving.max = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_gen_power_saving.min > + ps->performance_levels[i].pcie_gen) + data->pcie_gen_power_saving.min = + ps->performance_levels[i].pcie_gen; + + if (data->pcie_lane_power_saving.max < + ps->performance_levels[i].pcie_lane) + data->pcie_lane_power_saving.max = + ps->performance_levels[i].pcie_lane; + + if (data->pcie_lane_power_saving.min > + ps->performance_levels[i].pcie_lane) + data->pcie_lane_power_saving.min = + ps->performance_levels[i].pcie_lane; + } + break; + default: + break; + } + } + return 0; +} + +static void +iceland_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m) +{ + uint32_t sclk, mclk, activity_percent; + uint32_t offset; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetSclkFrequency)); + + sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)(PPSMC_MSG_API_GetMclkFrequency)); + + mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", mclk/100, sclk/100); + + offset = data->soft_regs_start + offsetof(SMU71_SoftRegisters, AverageGraphicsActivity); + activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); + activity_percent += 0x80; + activity_percent >>= 8; + + seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent); + + seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en"); + + seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en"); +} + +int iceland_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) +{ + uint32_t num_active_displays = 0; + struct cgs_display_info info = {0}; + info.mode_info = NULL; + + cgs_get_active_displays_info(hwmgr->device, &info); + + num_active_displays = info.display_count; + + if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */ + iceland_notify_smc_display_change(hwmgr, false); + else + iceland_notify_smc_display_change(hwmgr, true); + + return 0; +} + +/** +* Programs the display gap +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always OK +*/ +int iceland_program_display_gap(struct pp_hwmgr *hwmgr) +{ + uint32_t num_active_displays = 0; + uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); + uint32_t display_gap2; + uint32_t pre_vbi_time_in_us; + uint32_t frame_time_in_us; + uint32_t ref_clock; + uint32_t refresh_rate = 0; + struct cgs_display_info info = {0}; + struct cgs_mode_info mode_info; + + info.mode_info = &mode_info; + + cgs_get_active_displays_info(hwmgr->device, &info); + num_active_displays = info.display_count; + + display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0)? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap); + + ref_clock = mode_info.ref_clock; + refresh_rate = mode_info.refresh_rate; + + if(0 == refresh_rate) + refresh_rate = 60; + + frame_time_in_us = 1000000 / refresh_rate; + + pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; + display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SOFT_REGISTERS_TABLE_4, PreVBlankGap, 0x64); + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SOFT_REGISTERS_TABLE_5, VBlankTimeout, (frame_time_in_us - pre_vbi_time_in_us)); + + if (num_active_displays == 1) + iceland_notify_smc_display_change(hwmgr, true); + + return 0; +} + +int iceland_display_configuration_changed_task(struct pp_hwmgr *hwmgr) +{ + iceland_program_display_gap(hwmgr); + + return 0; +} + +/** +* Set maximum target operating fan output PWM +* +* @param pHwMgr: the address of the powerplay hardware manager. +* @param usMaxFanPwm: max operating fan PWM in percents +* @return The response that came from the SMC. +*/ +static int iceland_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) +{ + hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; + + if (phm_is_hw_access_blocked(hwmgr)) + return 0; + + return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm) ? 0 : -1); +} + +/** +* Set maximum target operating fan output RPM +* +* @param pHwMgr: the address of the powerplay hardware manager. +* @param usMaxFanRpm: max operating fan RPM value. +* @return The response that came from the SMC. +*/ +static int iceland_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) +{ + hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = us_max_fan_pwm; + + if (phm_is_hw_access_blocked(hwmgr)) + return 0; + + return (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanRpmMax, us_max_fan_pwm) ? 0 : -1); +} + +static int iceland_dpm_set_interrupt_state(void *private_data, + unsigned src_id, unsigned type, + int enabled) +{ + uint32_t cg_thermal_int; + struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr; + + if (hwmgr == NULL) + return -EINVAL; + + switch (type) { + case AMD_THERMAL_IRQ_LOW_TO_HIGH: + if (enabled) { + cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); + cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); + } else { + cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); + cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); + } + break; + + case AMD_THERMAL_IRQ_HIGH_TO_LOW: + if (enabled) { + cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); + cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); + } else { + cg_thermal_int = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT); + cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int); + } + break; + default: + break; + } + return 0; +} + +static int iceland_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr, + const void *thermal_interrupt_info) +{ + int result; + const struct pp_interrupt_registration_info *info = + (const struct pp_interrupt_registration_info *)thermal_interrupt_info; + + if (info == NULL) + return -EINVAL; + + result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST, + iceland_dpm_set_interrupt_state, + info->call_back, info->context); + + if (result) + return -EINVAL; + + result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST, + iceland_dpm_set_interrupt_state, + info->call_back, info->context); + + if (result) + return -EINVAL; + + return 0; +} + + +static bool iceland_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + bool is_update_required = false; + struct cgs_display_info info = {0,0,NULL}; + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (data->display_timing.num_existing_displays != info.display_count) + is_update_required = true; +/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL + if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { + cgs_get_min_clock_settings(hwmgr->device, &min_clocks); + if(min_clocks.engineClockInSR != data->display_timing.minClockInSR) + is_update_required = true; +*/ + return is_update_required; +} + + +static inline bool iceland_are_power_levels_equal(const struct iceland_performance_level *pl1, + const struct iceland_performance_level *pl2) +{ + return ((pl1->memory_clock == pl2->memory_clock) && + (pl1->engine_clock == pl2->engine_clock) && + (pl1->pcie_gen == pl2->pcie_gen) && + (pl1->pcie_lane == pl2->pcie_lane)); +} + +int iceland_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, + const struct pp_hw_power_state *pstate2, bool *equal) +{ + const struct iceland_power_state *psa = cast_const_phw_iceland_power_state(pstate1); + const struct iceland_power_state *psb = cast_const_phw_iceland_power_state(pstate2); + int i; + + if (equal == NULL || psa == NULL || psb == NULL) + return -EINVAL; + + /* If the two states don't even have the same number of performance levels they cannot be the same state. */ + if (psa->performance_level_count != psb->performance_level_count) { + *equal = false; + return 0; + } + + for (i = 0; i < psa->performance_level_count; i++) { + if (!iceland_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) { + /* If we have found even one performance level pair that is different the states are different. */ + *equal = false; + return 0; + } + } + + /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ + *equal = ((psa->uvd_clocks.VCLK == psb->uvd_clocks.VCLK) && (psa->uvd_clocks.DCLK == psb->uvd_clocks.DCLK)); + *equal &= ((psa->vce_clocks.EVCLK == psb->vce_clocks.EVCLK) && (psa->vce_clocks.ECCLK == psb->vce_clocks.ECCLK)); + *equal &= (psa->sclk_threshold == psb->sclk_threshold); + *equal &= (psa->acp_clk == psb->acp_clk); + + return 0; +} + +static int iceland_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +{ + if (mode) { + /* stop auto-manage */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) + iceland_fan_ctrl_stop_smc_fan_control(hwmgr); + iceland_fan_ctrl_set_static_mode(hwmgr, mode); + } else + /* restart auto-manage */ + iceland_fan_ctrl_reset_fan_speed_to_default(hwmgr); + + return 0; +} + +static int iceland_get_fan_control_mode(struct pp_hwmgr *hwmgr) +{ + if (hwmgr->fan_ctrl_is_in_default_mode) + return hwmgr->fan_ctrl_default_mode; + else + return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_FDO_CTRL2, FDO_PWM_MODE); +} + +static int iceland_force_clock_level(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, uint32_t mask) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) + return -EINVAL; + + switch (type) { + case PP_SCLK: + if (!data->sclk_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); + break; + case PP_MCLK: + if (!data->mclk_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); + break; + case PP_PCIE: + { + uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask; + uint32_t level = 0; + + while (tmp >>= 1) + level++; + + if (!data->pcie_dpm_key_disabled) + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_ForceLevel, + level); + break; + } + default: + break; + } + + return 0; +} + +static int iceland_print_clock_levels(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, char *buf) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + struct iceland_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + struct iceland_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); + int i, now, size = 0; + uint32_t clock, pcie_speed; + + switch (type) { + case PP_SCLK: + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency); + clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + for (i = 0; i < sclk_table->count; i++) { + if (clock > sclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < sclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, sclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_MCLK: + smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency); + clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + for (i = 0; i < mclk_table->count; i++) { + if (clock > mclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < mclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, mclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_PCIE: + pcie_speed = iceland_get_current_pcie_speed(hwmgr); + for (i = 0; i < pcie_table->count; i++) { + if (pcie_speed != pcie_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < pcie_table->count; i++) + size += sprintf(buf + size, "%d: %s %s\n", i, + (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" : + (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" : + (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "", + (i == now) ? "*" : ""); + break; + default: + break; + } + return size; +} + +static int iceland_get_sclk_od(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + struct iceland_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + int value; + + value = (sclk_table->dpm_levels[sclk_table->count - 1].value - + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * + 100 / + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return value; +} + +static int iceland_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + struct pp_power_state *ps; + struct iceland_power_state *iceland_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + iceland_ps = cast_phw_iceland_power_state(&ps->hardware); + + iceland_ps->performance_levels[iceland_ps->performance_level_count - 1].engine_clock = + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * + value / 100 + + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return 0; +} + +static int iceland_get_mclk_od(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + struct iceland_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + int value; + + value = (mclk_table->dpm_levels[mclk_table->count - 1].value - + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * + 100 / + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return value; +} + +uint32_t iceland_get_xclk(struct pp_hwmgr *hwmgr) +{ + uint32_t reference_clock; + uint32_t tc; + uint32_t divide; + + ATOM_FIRMWARE_INFO *fw_info; + uint16_t size; + uint8_t frev, crev; + int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); + + tc = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK); + + if (tc) + return TCLK; + + fw_info = (ATOM_FIRMWARE_INFO *)cgs_atom_get_data_table(hwmgr->device, index, + &size, &frev, &crev); + + if (!fw_info) + return 0; + + reference_clock = le16_to_cpu(fw_info->usReferenceClock); + + divide = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE); + + if (0 != divide) + return reference_clock / 4; + + return reference_clock; +} + +static int iceland_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + struct pp_power_state *ps; + struct iceland_power_state *iceland_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + iceland_ps = cast_phw_iceland_power_state(&ps->hardware); + + iceland_ps->performance_levels[iceland_ps->performance_level_count - 1].memory_clock = + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * + value / 100 + + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return 0; +} + +static const struct pp_hwmgr_func iceland_hwmgr_funcs = { + .backend_init = &iceland_hwmgr_backend_init, + .backend_fini = &iceland_hwmgr_backend_fini, + .asic_setup = &iceland_setup_asic_task, + .dynamic_state_management_enable = &iceland_enable_dpm_tasks, + .apply_state_adjust_rules = iceland_apply_state_adjust_rules, + .force_dpm_level = &iceland_force_dpm_level, + .power_state_set = iceland_set_power_state_tasks, + .get_power_state_size = iceland_get_power_state_size, + .get_mclk = iceland_dpm_get_mclk, + .get_sclk = iceland_dpm_get_sclk, + .patch_boot_state = iceland_dpm_patch_boot_state, + .get_pp_table_entry = iceland_get_pp_table_entry, + .get_num_of_pp_table_entries = iceland_get_num_of_entries, + .print_current_perforce_level = iceland_print_current_perforce_level, + .powerdown_uvd = iceland_phm_powerdown_uvd, + .powergate_uvd = iceland_phm_powergate_uvd, + .powergate_vce = iceland_phm_powergate_vce, + .disable_clock_power_gating = iceland_phm_disable_clock_power_gating, + .update_clock_gatings = iceland_phm_update_clock_gatings, + .notify_smc_display_config_after_ps_adjustment = iceland_notify_smc_display_config_after_ps_adjustment, + .display_config_changed = iceland_display_configuration_changed_task, + .set_max_fan_pwm_output = iceland_set_max_fan_pwm_output, + .set_max_fan_rpm_output = iceland_set_max_fan_rpm_output, + .get_temperature = iceland_thermal_get_temperature, + .stop_thermal_controller = iceland_thermal_stop_thermal_controller, + .get_fan_speed_info = iceland_fan_ctrl_get_fan_speed_info, + .get_fan_speed_percent = iceland_fan_ctrl_get_fan_speed_percent, + .set_fan_speed_percent = iceland_fan_ctrl_set_fan_speed_percent, + .reset_fan_speed_to_default = iceland_fan_ctrl_reset_fan_speed_to_default, + .get_fan_speed_rpm = iceland_fan_ctrl_get_fan_speed_rpm, + .set_fan_speed_rpm = iceland_fan_ctrl_set_fan_speed_rpm, + .uninitialize_thermal_controller = iceland_thermal_ctrl_uninitialize_thermal_controller, + .register_internal_thermal_interrupt = iceland_register_internal_thermal_interrupt, + .check_smc_update_required_for_display_configuration = iceland_check_smc_update_required_for_display_configuration, + .check_states_equal = iceland_check_states_equal, + .set_fan_control_mode = iceland_set_fan_control_mode, + .get_fan_control_mode = iceland_get_fan_control_mode, + .force_clock_level = iceland_force_clock_level, + .print_clock_levels = iceland_print_clock_levels, + .get_sclk_od = iceland_get_sclk_od, + .set_sclk_od = iceland_set_sclk_od, + .get_mclk_od = iceland_get_mclk_od, + .set_mclk_od = iceland_set_mclk_od, +}; + +int iceland_hwmgr_init(struct pp_hwmgr *hwmgr) +{ + iceland_hwmgr *data; + + data = kzalloc (sizeof(iceland_hwmgr), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + memset(data, 0x00, sizeof(iceland_hwmgr)); + + hwmgr->backend = data; + hwmgr->hwmgr_func = &iceland_hwmgr_funcs; + hwmgr->pptable_func = &pptable_funcs; + + /* thermal */ + pp_iceland_thermal_initialize(hwmgr); + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h new file mode 100644 index 000000000000..f253988de2d2 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h @@ -0,0 +1,424 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui <ray.huang@amd.com> + * + */ +#ifndef ICELAND_HWMGR_H +#define ICELAND_HWMGR_H + +#include "hwmgr.h" +#include "ppatomctrl.h" +#include "ppinterrupt.h" +#include "ppsmc.h" +#include "iceland_powertune.h" +#include "pp_endian.h" +#include "smu71_discrete.h" + +#define ICELAND_MAX_HARDWARE_POWERLEVELS 2 +#define ICELAND_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15 + +struct iceland_performance_level { + uint32_t memory_clock; + uint32_t engine_clock; + uint16_t pcie_gen; + uint16_t pcie_lane; +}; + +struct _phw_iceland_bacos { + uint32_t best_match; + uint32_t baco_flags; + struct iceland_performance_level performance_level; +}; +typedef struct _phw_iceland_bacos phw_iceland_bacos; + +struct _phw_iceland_uvd_clocks { + uint32_t VCLK; + uint32_t DCLK; +}; + +typedef struct _phw_iceland_uvd_clocks phw_iceland_uvd_clocks; + +struct _phw_iceland_vce_clocks { + uint32_t EVCLK; + uint32_t ECCLK; +}; + +typedef struct _phw_iceland_vce_clocks phw_iceland_vce_clocks; + +struct iceland_power_state { + uint32_t magic; + phw_iceland_uvd_clocks uvd_clocks; + phw_iceland_vce_clocks vce_clocks; + uint32_t sam_clk; + uint32_t acp_clk; + uint16_t performance_level_count; + bool dc_compatible; + uint32_t sclk_threshold; + struct iceland_performance_level performance_levels[ICELAND_MAX_HARDWARE_POWERLEVELS]; +}; + +struct _phw_iceland_dpm_level { + bool enabled; + uint32_t value; + uint32_t param1; +}; +typedef struct _phw_iceland_dpm_level phw_iceland_dpm_level; + +#define ICELAND_MAX_DEEPSLEEP_DIVIDER_ID 5 +#define MAX_REGULAR_DPM_NUMBER 8 +#define ICELAND_MINIMUM_ENGINE_CLOCK 5000 + +struct iceland_single_dpm_table { + uint32_t count; + phw_iceland_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER]; +}; + +struct iceland_dpm_table { + struct iceland_single_dpm_table sclk_table; + struct iceland_single_dpm_table mclk_table; + struct iceland_single_dpm_table pcie_speed_table; + struct iceland_single_dpm_table vddc_table; + struct iceland_single_dpm_table vdd_gfx_table; + struct iceland_single_dpm_table vdd_ci_table; + struct iceland_single_dpm_table mvdd_table; +}; +typedef struct _phw_iceland_dpm_table phw_iceland_dpm_table; + + +struct _phw_iceland_clock_regisiters { + uint32_t vCG_SPLL_FUNC_CNTL; + uint32_t vCG_SPLL_FUNC_CNTL_2; + uint32_t vCG_SPLL_FUNC_CNTL_3; + uint32_t vCG_SPLL_FUNC_CNTL_4; + uint32_t vCG_SPLL_SPREAD_SPECTRUM; + uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t vDLL_CNTL; + uint32_t vMCLK_PWRMGT_CNTL; + uint32_t vMPLL_AD_FUNC_CNTL; + uint32_t vMPLL_DQ_FUNC_CNTL; + uint32_t vMPLL_FUNC_CNTL; + uint32_t vMPLL_FUNC_CNTL_1; + uint32_t vMPLL_FUNC_CNTL_2; + uint32_t vMPLL_SS1; + uint32_t vMPLL_SS2; +}; +typedef struct _phw_iceland_clock_regisiters phw_iceland_clock_registers; + +struct _phw_iceland_voltage_smio_registers { + uint32_t vs0_vid_lower_smio_cntl; +}; +typedef struct _phw_iceland_voltage_smio_registers phw_iceland_voltage_smio_registers; + + +struct _phw_iceland_mc_reg_entry { + uint32_t mclk_max; + uint32_t mc_data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; +typedef struct _phw_iceland_mc_reg_entry phw_iceland_mc_reg_entry; + +struct _phw_iceland_mc_reg_table { + uint8_t last; /* number of registers*/ + uint8_t num_entries; /* number of entries in mc_reg_table_entry used*/ + uint16_t validflag; /* indicate the corresponding register is valid or not. 1: valid, 0: invalid. bit0->address[0], bit1->address[1], etc.*/ + phw_iceland_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; + SMU71_Discrete_MCRegisterAddress mc_reg_address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; +typedef struct _phw_iceland_mc_reg_table phw_iceland_mc_reg_table; + +#define DISABLE_MC_LOADMICROCODE 1 +#define DISABLE_MC_CFGPROGRAMMING 2 + + +/*Ultra Low Voltage parameter structure */ +struct phw_iceland_ulv_parm{ + bool ulv_supported; + uint32_t ch_ulv_parameter; + uint32_t ulv_volt_change_delay; + struct iceland_performance_level ulv_power_level; +}; + +#define ICELAND_MAX_LEAKAGE_COUNT 8 + +struct phw_iceland_leakage_voltage { + uint16_t count; + uint16_t leakage_id[ICELAND_MAX_LEAKAGE_COUNT]; + uint16_t actual_voltage[ICELAND_MAX_LEAKAGE_COUNT]; +}; + +struct _phw_iceland_display_timing { + uint32_t min_clock_insr; + uint32_t num_existing_displays; +}; +typedef struct _phw_iceland_display_timing phw_iceland_display_timing; + + +struct phw_iceland_thermal_temperature_setting +{ + long temperature_low; + long temperature_high; + long temperature_shutdown; +}; + +struct _phw_iceland_dpmlevel_enable_mask { + uint32_t uvd_dpm_enable_mask; + uint32_t vce_dpm_enable_mask; + uint32_t acp_dpm_enable_mask; + uint32_t samu_dpm_enable_mask; + uint32_t sclk_dpm_enable_mask; + uint32_t mclk_dpm_enable_mask; + uint32_t pcie_dpm_enable_mask; +}; +typedef struct _phw_iceland_dpmlevel_enable_mask phw_iceland_dpmlevel_enable_mask; + +struct _phw_iceland_pcie_perf_range { + uint16_t max; + uint16_t min; +}; +typedef struct _phw_iceland_pcie_perf_range phw_iceland_pcie_perf_range; + +struct _phw_iceland_vbios_boot_state { + uint16_t mvdd_bootup_value; + uint16_t vddc_bootup_value; + uint16_t vddci_bootup_value; + uint16_t vddgfx_bootup_value; + uint32_t sclk_bootup_value; + uint32_t mclk_bootup_value; + uint16_t pcie_gen_bootup_value; + uint16_t pcie_lane_bootup_value; +}; +typedef struct _phw_iceland_vbios_boot_state phw_iceland_vbios_boot_state; + +#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 +#define DPMTABLE_OD_UPDATE_MCLK 0x00000002 +#define DPMTABLE_UPDATE_SCLK 0x00000004 +#define DPMTABLE_UPDATE_MCLK 0x00000008 + +/* We need to review which fields are needed. */ +/* This is mostly a copy of the RV7xx/Evergreen structure which is close, but not identical to the N.Islands one. */ +struct iceland_hwmgr { + struct iceland_dpm_table dpm_table; + struct iceland_dpm_table golden_dpm_table; + + uint32_t voting_rights_clients0; + uint32_t voting_rights_clients1; + uint32_t voting_rights_clients2; + uint32_t voting_rights_clients3; + uint32_t voting_rights_clients4; + uint32_t voting_rights_clients5; + uint32_t voting_rights_clients6; + uint32_t voting_rights_clients7; + uint32_t static_screen_threshold_unit; + uint32_t static_screen_threshold; + uint32_t voltage_control; + uint32_t vdd_gfx_control; + + uint32_t vddc_vddci_delta; + uint32_t vddc_vddgfx_delta; + + struct pp_interrupt_registration_info internal_high_thermal_interrupt_info; + struct pp_interrupt_registration_info internal_low_thermal_interrupt_info; + struct pp_interrupt_registration_info smc_to_host_interrupt_info; + uint32_t active_auto_throttle_sources; + + struct pp_interrupt_registration_info external_throttle_interrupt; + irq_handler_func_t external_throttle_callback; + void *external_throttle_context; + + struct pp_interrupt_registration_info ctf_interrupt_info; + irq_handler_func_t ctf_callback; + void *ctf_context; + + phw_iceland_clock_registers clock_registers; + phw_iceland_voltage_smio_registers voltage_smio_registers; + + bool is_memory_GDDR5; + uint16_t acpi_vddc; + bool pspp_notify_required; /* Flag to indicate if PSPP notification to SBIOS is required */ + uint16_t force_pcie_gen; /* The forced PCI-E speed if not 0xffff */ + uint16_t acpi_pcie_gen; /* The PCI-E speed at ACPI time */ + uint32_t pcie_gen_cap; /* The PCI-E speed capabilities bitmap from CAIL */ + uint32_t pcie_lane_cap; /* The PCI-E lane capabilities bitmap from CAIL */ + uint32_t pcie_spc_cap; /* Symbol Per Clock Capabilities from registry */ + struct phw_iceland_leakage_voltage vddc_leakage; /* The Leakage VDDC supported (based on leakage ID).*/ + struct phw_iceland_leakage_voltage vddcgfx_leakage; /* The Leakage VDDC supported (based on leakage ID). */ + struct phw_iceland_leakage_voltage vddci_leakage; /* The Leakage VDDCI supported (based on leakage ID). */ + + uint32_t mvdd_control; + uint32_t vddc_mask_low; + uint32_t mvdd_mask_low; + uint16_t max_vddc_in_pp_table; /* the maximum VDDC value in the powerplay table*/ + uint16_t min_vddc_in_pp_table; + uint16_t max_vddci_in_pp_table; /* the maximum VDDCI value in the powerplay table */ + uint16_t min_vddci_in_pp_table; + uint32_t mclk_strobe_mode_threshold; + uint32_t mclk_stutter_mode_threshold; + uint32_t mclk_edc_enable_threshold; + uint32_t mclk_edc_wr_enable_threshold; + bool is_uvd_enabled; + bool is_xdma_enabled; + phw_iceland_vbios_boot_state vbios_boot_state; + + bool battery_state; + bool is_tlu_enabled; + bool pcie_performance_request; + + /* -------------- SMC SRAM Address of firmware header tables ----------------*/ + uint32_t sram_end; /* The first address after the SMC SRAM. */ + uint32_t dpm_table_start; /* The start of the dpm table in the SMC SRAM. */ + uint32_t soft_regs_start; /* The start of the soft registers in the SMC SRAM. */ + uint32_t mc_reg_table_start; /* The start of the mc register table in the SMC SRAM. */ + uint32_t fan_table_start; /* The start of the fan table in the SMC SRAM. */ + uint32_t arb_table_start; /* The start of the ARB setting table in the SMC SRAM. */ + uint32_t ulv_settings_start; + SMU71_Discrete_DpmTable smc_state_table; /* The carbon copy of the SMC state table. */ + SMU71_Discrete_MCRegisters mc_reg_table; + SMU71_Discrete_Ulv ulv_setting; /* The carbon copy of ULV setting. */ + + /* -------------- Stuff originally coming from Evergreen --------------------*/ + phw_iceland_mc_reg_table iceland_mc_reg_table; + uint32_t vdd_ci_control; + pp_atomctrl_voltage_table vddc_voltage_table; + pp_atomctrl_voltage_table vddci_voltage_table; + pp_atomctrl_voltage_table vddgfx_voltage_table; + pp_atomctrl_voltage_table mvdd_voltage_table; + + uint32_t mgcg_cgtt_local2; + uint32_t mgcg_cgtt_local3; + uint32_t gpio_debug; + uint32_t mc_micro_code_feature; + uint32_t highest_mclk; + uint16_t acpi_vdd_ci; + uint8_t mvdd_high_index; + uint8_t mvdd_low_index; + bool dll_defaule_on; + bool performance_request_registered; + + /* ----------------- Low Power Features ---------------------*/ + phw_iceland_bacos bacos; + struct phw_iceland_ulv_parm ulv; + + /* ----------------- CAC Stuff ---------------------*/ + uint32_t cac_table_start; + bool cac_configuration_required; /* TRUE if PP_CACConfigurationRequired == 1 */ + bool driver_calculate_cac_leakage; /* TRUE if PP_DriverCalculateCACLeakage == 1 */ + bool cac_enabled; + + /* ----------------- DPM2 Parameters ---------------------*/ + uint32_t power_containment_features; + bool enable_bapm_feature; + bool enable_dte_feature; + bool enable_tdc_limit_feature; + bool enable_pkg_pwr_tracking_feature; + bool disable_uvd_power_tune_feature; + struct iceland_pt_defaults *power_tune_defaults; + SMU71_Discrete_PmFuses power_tune_table; + uint32_t ul_dte_tj_offset; /* Fudge factor in DPM table to correct HW DTE errors */ + uint32_t fast_watermark_threshold; /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */ + + /* ----------------- Phase Shedding ---------------------*/ + bool vddc_phase_shed_control; + + /* --------------------- DI/DT --------------------------*/ + phw_iceland_display_timing display_timing; + + /* --------- ReadRegistry data for memory and engine clock margins ---- */ + uint32_t engine_clock_data; + uint32_t memory_clock_data; + + /* -------- Thermal Temperature Setting --------------*/ + struct phw_iceland_thermal_temperature_setting thermal_temp_setting; + phw_iceland_dpmlevel_enable_mask dpm_level_enable_mask; + + uint32_t need_update_smu7_dpm_table; + uint32_t sclk_dpm_key_disabled; + uint32_t mclk_dpm_key_disabled; + uint32_t pcie_dpm_key_disabled; + /* used to store the previous dal min sclock */ + uint32_t min_engine_clocks; + phw_iceland_pcie_perf_range pcie_gen_performance; + phw_iceland_pcie_perf_range pcie_lane_performance; + phw_iceland_pcie_perf_range pcie_gen_power_saving; + phw_iceland_pcie_perf_range pcie_lane_power_saving; + bool use_pcie_performance_levels; + bool use_pcie_power_saving_levels; + /* percentage value from 0-100, default 50 */ + uint32_t activity_target[SMU71_MAX_LEVELS_GRAPHICS]; + uint32_t mclk_activity_target; + uint32_t low_sclk_interrupt_threshold; + uint32_t last_mclk_dpm_enable_mask; + bool uvd_enabled; + uint32_t pcc_monitor_enabled; + + /* --------- Power Gating States ------------*/ + bool uvd_power_gated; /* 1: gated, 0:not gated */ + bool vce_power_gated; /* 1: gated, 0:not gated */ + bool samu_power_gated; /* 1: gated, 0:not gated */ + bool acp_power_gated; /* 1: gated, 0:not gated */ + bool pg_acp_init; + + /* soft pptable for re-uploading into smu */ + void *soft_pp_table; +}; + +typedef struct iceland_hwmgr iceland_hwmgr; + +int iceland_hwmgr_init(struct pp_hwmgr *hwmgr); +int iceland_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate); +uint32_t iceland_get_xclk(struct pp_hwmgr *hwmgr); +int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr); +int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr); + +#define ICELAND_DPM2_NEAR_TDP_DEC 10 +#define ICELAND_DPM2_ABOVE_SAFE_INC 5 +#define ICELAND_DPM2_BELOW_SAFE_INC 20 + +/* + * Log2 of the LTA window size (l2numWin_TDP). Eg. If LTA windows size + * is 128, then this value should be Log2(128) = 7. + */ +#define ICELAND_DPM2_LTA_WINDOW_SIZE 7 + +#define ICELAND_DPM2_LTS_TRUNCATE 0 + +#define ICELAND_DPM2_TDP_SAFE_LIMIT_PERCENT 80 // Maximum 100 + +#define ICELAND_DPM2_MAXPS_PERCENT_H 90 // Maximum 0xFF +#define ICELAND_DPM2_MAXPS_PERCENT_M 90 // Maximum 0xFF + +#define ICELAND_DPM2_PWREFFICIENCYRATIO_MARGIN 50 + +#define ICELAND_DPM2_SQ_RAMP_MAX_POWER 0x3FFF +#define ICELAND_DPM2_SQ_RAMP_MIN_POWER 0x12 +#define ICELAND_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15 +#define ICELAND_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE 0x1E +#define ICELAND_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO 0xF + +#define ICELAND_VOLTAGE_CONTROL_NONE 0x0 +#define ICELAND_VOLTAGE_CONTROL_BY_GPIO 0x1 +#define ICELAND_VOLTAGE_CONTROL_BY_SVID2 0x2 + +/* convert to Q8.8 format for firmware */ +#define ICELAND_Q88_FORMAT_CONVERSION_UNIT 256 + +#define ICELAND_UNUSED_GPIO_PIN 0x7F + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c new file mode 100644 index 000000000000..041e9648e592 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c @@ -0,0 +1,490 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui <ray.huang@amd.com> + * + */ + +#include "amdgpu.h" +#include "hwmgr.h" +#include "smumgr.h" +#include "iceland_hwmgr.h" +#include "iceland_powertune.h" +#include "iceland_smumgr.h" +#include "smu71_discrete.h" +#include "smu71.h" +#include "pp_debug.h" +#include "cgs_common.h" +#include "pp_endian.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#define VOLTAGE_SCALE 4 +#define POWERTUNE_DEFAULT_SET_MAX 1 + +#define DEVICE_ID_VI_ICELAND_M_6900 0x6900 +#define DEVICE_ID_VI_ICELAND_M_6901 0x6901 +#define DEVICE_ID_VI_ICELAND_M_6902 0x6902 +#define DEVICE_ID_VI_ICELAND_M_6903 0x6903 + + +struct iceland_pt_defaults defaults_iceland = +{ + /* + * sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, + * TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT + */ + 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, + { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 }, + { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } +}; + +/* 35W - XT, XTL */ +struct iceland_pt_defaults defaults_icelandxt = +{ + /* + * sviLoadLIneEn, SviLoadLineVddC, + * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, + * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, + * BAPM_TEMP_GRADIENT + */ + 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0, + { 0xA7, 0x0, 0x0, 0xB5, 0x0, 0x0, 0x9F, 0x0, 0x0, 0xD6, 0x0, 0x0, 0xD7, 0x0, 0x0}, + { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0} +}; + +/* 25W - PRO, LE */ +struct iceland_pt_defaults defaults_icelandpro = +{ + /* + * sviLoadLIneEn, SviLoadLineVddC, + * TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, + * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, + * BAPM_TEMP_GRADIENT + */ + 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x0, + { 0xB7, 0x0, 0x0, 0xC3, 0x0, 0x0, 0xB5, 0x0, 0x0, 0xEA, 0x0, 0x0, 0xE6, 0x0, 0x0}, + { 0x1EA, 0x0, 0x0, 0x224, 0x0, 0x0, 0x25E, 0x0, 0x0, 0x28E, 0x0, 0x0, 0x2AB, 0x0, 0x0} +}; + +void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint32_t tmp = 0; + struct cgs_system_info sys_info = {0}; + uint32_t pdev_id; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV; + cgs_query_system_info(hwmgr->device, &sys_info); + pdev_id = (uint32_t)sys_info.value; + + switch (pdev_id) { + case DEVICE_ID_VI_ICELAND_M_6900: + case DEVICE_ID_VI_ICELAND_M_6903: + data->power_tune_defaults = &defaults_icelandxt; + break; + + case DEVICE_ID_VI_ICELAND_M_6901: + case DEVICE_ID_VI_ICELAND_M_6902: + data->power_tune_defaults = &defaults_icelandpro; + break; + default: + /* TODO: need to assign valid defaults */ + data->power_tune_defaults = &defaults_iceland; + pr_warning("Unknown V.I. Device ID.\n"); + break; + } + + /* Assume disabled */ + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SQRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TDRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TCPRamping); + + data->ul_dte_tj_offset = tmp; + + if (!tmp) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + + data->fast_watermark_threshold = 100; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + tmp = 1; + data->enable_dte_feature = tmp ? false : true; + data->enable_tdc_limit_feature = tmp ? true : false; + data->enable_pkg_pwr_tracking_feature = tmp ? true : false; + } + } +} + +int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + struct iceland_pt_defaults *defaults = data->power_tune_defaults; + SMU71_Discrete_DpmTable *dpm_table = &(data->smc_state_table); + struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table; + struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table; + uint16_t *def1, *def2; + int i, j, k; + + /* + * TDP number of fraction bits are changed from 8 to 7 for Iceland + * as requested by SMC team + */ + dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256)); + dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256)); + + dpm_table->DTETjOffset = (uint8_t)data->ul_dte_tj_offset; + + dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES); + dpm_table->GpuTjHyst = 8; + + dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base; + + /* The following are for new Iceland Multi-input fan/thermal control */ + if(NULL != ppm) { + dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000; + dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256; + } else { + dpm_table->PPM_PkgPwrLimit = 0; + dpm_table->PPM_TemperatureLimit = 0; + } + + CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit); + CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit); + + dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient); + def1 = defaults->bapmti_r; + def2 = defaults->bapmti_rc; + + for (i = 0; i < SMU71_DTE_ITERATIONS; i++) { + for (j = 0; j < SMU71_DTE_SOURCES; j++) { + for (k = 0; k < SMU71_DTE_SINKS; k++) { + dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1); + dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2); + def1++; + def2++; + } + } + } + + return 0; +} + +static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + const struct iceland_pt_defaults *defaults = data->power_tune_defaults; + + data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en; + data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc; + data->power_tune_table.SviLoadLineTrimVddC = 3; + data->power_tune_table.SviLoadLineOffsetVddC = 0; + + return 0; +} + +static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr) +{ + uint16_t tdc_limit; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + const struct iceland_pt_defaults *defaults = data->power_tune_defaults; + + /* TDC number of fraction bits are changed from 8 to 7 + * for Iceland as requested by SMC team + */ + tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256); + data->power_tune_table.TDC_VDDC_PkgLimit = + CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); + data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = + defaults->tdc_vddc_throttle_release_limit_perc; + data->power_tune_table.TDC_MAWt = defaults->tdc_mawt; + + return 0; +} + +static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + const struct iceland_pt_defaults *defaults = data->power_tune_defaults; + uint32_t temp; + + if (iceland_read_smc_sram_dword(hwmgr->smumgr, + fuse_table_offset + + offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl), + (uint32_t *)&temp, data->sram_end)) + PP_ASSERT_WITH_CODE(false, + "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", + return -EINVAL); + else + data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl; + + return 0; +} + +static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr) +{ + return 0; +} + +static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr) +{ + int i; + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 8; i++) + data->power_tune_table.GnbLPML[i] = 0; + + return 0; +} + +static int iceland_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) +{ + return 0; +} + +static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint16_t HiSidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd; + uint16_t LoSidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd; + struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table; + + HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); + LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); + + data->power_tune_table.BapmVddCBaseLeakageHiSidd = + CONVERT_FROM_HOST_TO_SMC_US(HiSidd); + data->power_tune_table.BapmVddCBaseLeakageLoSidd = + CONVERT_FROM_HOST_TO_SMC_US(LoSidd); + + return 0; +} + +int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + uint32_t pm_fuse_table_offset; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (iceland_read_smc_sram_dword(hwmgr->smumgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, PmFuseTable), + &pm_fuse_table_offset, data->sram_end)) + PP_ASSERT_WITH_CODE(false, + "Attempt to get pm_fuse_table_offset Failed!", + return -EINVAL); + + /* DW0 - DW3 */ + if (iceland_populate_bapm_vddc_vid_sidd(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate bapm vddc vid Failed!", + return -EINVAL); + + /* DW4 - DW5 */ + if (iceland_populate_vddc_vid(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate vddc vid Failed!", + return -EINVAL); + + /* DW6 */ + if (iceland_populate_svi_load_line(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate SviLoadLine Failed!", + return -EINVAL); + /* DW7 */ + if (iceland_populate_tdc_limit(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TDCLimit Failed!", return -EINVAL); + /* DW8 */ + if (iceland_populate_dw8(hwmgr, pm_fuse_table_offset)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TdcWaterfallCtl, " + "LPMLTemperature Min and Max Failed!", + return -EINVAL); + + /* DW9-DW12 */ + if (0 != iceland_populate_temperature_scaler(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate LPMLTemperatureScaler Failed!", + return -EINVAL); + + /* DW13-DW16 */ + if (iceland_populate_gnb_lpml(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Failed!", + return -EINVAL); + + /* DW17 */ + if (iceland_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Min and Max Vid Failed!", + return -EINVAL); + + /* DW18 */ + if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!", + return -EINVAL); + + if (iceland_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + (uint8_t *)&data->power_tune_table, + sizeof(struct SMU71_Discrete_PmFuses), data->sram_end)) + PP_ASSERT_WITH_CODE(false, + "Attempt to download PmFuseTable Failed!", + return -EINVAL); + } + return 0; +} + +int iceland_enable_smc_cac(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC)) { + int smc_result; + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_EnableCac)); + PP_ASSERT_WITH_CODE((0 == smc_result), + "Failed to enable CAC in SMC.", result = -1); + + data->cac_enabled = (0 == smc_result) ? true : false; + } + return result; +} + +static int iceland_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + + if(data->power_containment_features & + POWERCONTAINMENT_FEATURE_PkgPwrLimit) + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_PkgPwrSetLimit, n); + return 0; +} + +static int iceland_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp) +{ + return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr, + PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); +} + +int iceland_enable_power_containment(struct pp_hwmgr *hwmgr) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + SMU71_Discrete_DpmTable *dpm_table = &data->smc_state_table; + int smc_result; + int result = 0; + uint32_t is_asic_kicker; + + data->power_containment_features = 0; + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + is_asic_kicker = cgs_read_register(hwmgr->device, mmCC_BIF_BX_STRAP2); + is_asic_kicker = (is_asic_kicker >> 12) & 0x01; + + if (data->enable_bapm_feature && + (!is_asic_kicker || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableUsingActualTemperatureForPowerCalc))) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_EnableDTE)); + PP_ASSERT_WITH_CODE((0 == smc_result), + "Failed to enable BAPM in SMC.", result = -1;); + if (0 == smc_result) + data->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM; + } + + if (is_asic_kicker && !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableUsingActualTemperatureForPowerCalc)) + dpm_table->DTEMode = 2; + + if (data->enable_tdc_limit_feature) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_TDCLimitEnable)); + PP_ASSERT_WITH_CODE((0 == smc_result), + "Failed to enable TDCLimit in SMC.", result = -1;); + if (0 == smc_result) + data->power_containment_features |= + POWERCONTAINMENT_FEATURE_TDCLimit; + } + + if (data->enable_pkg_pwr_tracking_feature) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable)); + PP_ASSERT_WITH_CODE((0 == smc_result), + "Failed to enable PkgPwrTracking in SMC.", result = -1;); + if (0 == smc_result) { + struct phm_cac_tdp_table *cac_table = + hwmgr->dyn_state.cac_dtp_table; + uint32_t default_limit = + (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256); + + data->power_containment_features |= + POWERCONTAINMENT_FEATURE_PkgPwrLimit; + + if (iceland_set_power_limit(hwmgr, default_limit)) + printk(KERN_ERR "Failed to set Default Power Limit in SMC!"); + } + } + } + return result; +} + +int iceland_power_control_set_level(struct pp_hwmgr *hwmgr) +{ + struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table; + int adjust_percent, target_tdp; + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + /* adjustment percentage has already been validated */ + adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ? + hwmgr->platform_descriptor.TDPAdjustment : + (-1 * hwmgr->platform_descriptor.TDPAdjustment); + /* + * SMC requested that target_tdp to be 7 bit fraction in DPM table + * but message to be 8 bit fraction for messages + */ + target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100; + result = iceland_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp); + } + + return result; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h new file mode 100644 index 000000000000..6c25ee139ca3 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h @@ -0,0 +1,74 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui <ray.huang@amd.com> + * + */ +#ifndef ICELAND_POWERTUNE_H +#define ICELAND_POWERTUNE_H + +#include "smu71.h" + +enum iceland_pt_config_reg_type { + ICELAND_CONFIGREG_MMR = 0, + ICELAND_CONFIGREG_SMC_IND, + ICELAND_CONFIGREG_DIDT_IND, + ICELAND_CONFIGREG_CACHE, + ICELAND_CONFIGREG_MAX +}; + +/* PowerContainment Features */ +#define POWERCONTAINMENT_FEATURE_DTE 0x00000001 +#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 +#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 +#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001 + +struct iceland_pt_config_reg { + uint32_t offset; + uint32_t mask; + uint32_t shift; + uint32_t value; + enum iceland_pt_config_reg_type type; +}; + +struct iceland_pt_defaults +{ + uint8_t svi_load_line_en; + uint8_t svi_load_line_vddc; + uint8_t tdc_vddc_throttle_release_limit_perc; + uint8_t tdc_mawt; + uint8_t tdc_waterfall_ctl; + uint8_t dte_ambient_temp_base; + uint32_t display_cac; + uint32_t bamp_temp_gradient; + uint16_t bapmti_r[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS]; + uint16_t bapmti_rc[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS]; +}; + +void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); +int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); +int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr); +int iceland_enable_smc_cac(struct pp_hwmgr *hwmgr); +int iceland_enable_power_containment(struct pp_hwmgr *hwmgr); +int iceland_power_control_set_level(struct pp_hwmgr *hwmgr); + +#endif /* ICELAND_POWERTUNE_H */ + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c new file mode 100644 index 000000000000..527f37022424 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c @@ -0,0 +1,595 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui <ray.huang@amd.com> + * + */ +#include <asm/div64.h> +#include "iceland_thermal.h" +#include "iceland_hwmgr.h" +#include "iceland_smumgr.h" +#include "atombios.h" +#include "ppsmc.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "smu/smu_7_1_1_d.h" +#include "smu/smu_7_1_1_sh_mask.h" + + +/** +* Get Fan Speed Control Parameters. +* @param hwmgr the address of the powerplay hardware manager. +* @param pSpeed is the address of the structure where the result is to be placed. +* @exception Always succeeds except if we cannot zero out the output structure. +*/ +int iceland_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, + struct phm_fan_speed_info *fan_speed_info) +{ + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + fan_speed_info->supports_percent_read = true; + fan_speed_info->supports_percent_write = true; + fan_speed_info->min_percent = 0; + fan_speed_info->max_percent = 100; + + if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) { + fan_speed_info->supports_rpm_read = true; + fan_speed_info->supports_rpm_write = true; + fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM; + fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM; + } else { + fan_speed_info->min_rpm = 0; + fan_speed_info->max_rpm = 0; + } + + return 0; +} + +/** +* Get Fan Speed in percent. +* @param hwmgr the address of the powerplay hardware manager. +* @param pSpeed is the address of the structure where the result is to be placed. +* @exception Fails is the 100% setting appears to be 0. +*/ +int iceland_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed) +{ + uint32_t duty100; + uint32_t duty; + uint64_t tmp64; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); + duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_STATUS, FDO_PWM_DUTY); + + if (0 == duty100) + return -EINVAL; + + + tmp64 = (uint64_t)duty * 100; + do_div(tmp64, duty100); + *speed = (uint32_t)tmp64; + + if (*speed > 100) + *speed = 100; + + return 0; +} + +/** +* Get Fan Speed in RPM. +* @param hwmgr the address of the powerplay hardware manager. +* @param speed is the address of the structure where the result is to be placed. +* @exception Returns not supported if no fan is found or if pulses per revolution are not set +*/ +int iceland_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) +{ + return 0; +} + +/** +* Set Fan Speed Control to static mode, so that the user can decide what speed to use. +* @param hwmgr the address of the powerplay hardware manager. +* mode the fan control mode, 0 default, 1 by percent, 5, by RPM +* @exception Should always succeed. +*/ +int iceland_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +{ + + if (hwmgr->fan_ctrl_is_in_default_mode) { + hwmgr->fan_ctrl_default_mode = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE); + hwmgr->tmin = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN); + hwmgr->fan_ctrl_is_in_default_mode = false; + } + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, 0); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, mode); + + return 0; +} + +/** +* Reset Fan Speed Control to default mode. +* @param hwmgr the address of the powerplay hardware manager. +* @exception Should always succeed. +*/ +static int iceland_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) +{ + if (!hwmgr->fan_ctrl_is_in_default_mode) { + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TMIN, hwmgr->tmin); + hwmgr->fan_ctrl_is_in_default_mode = true; + } + + return 0; +} + +int iceland_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) +{ + return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl) == 0) ? 0 : -EINVAL; +} + + +int iceland_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) +{ + return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl) == 0) ? 0 : -EINVAL; +} + +/** +* Set Fan Speed in percent. +* @param hwmgr the address of the powerplay hardware manager. +* @param speed is the percentage value (0% - 100%) to be set. +* @exception Fails is the 100% setting appears to be 0. +*/ +int iceland_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed) +{ + uint32_t duty100; + uint32_t duty; + uint64_t tmp64; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return -EINVAL; + + if (speed > 100) { + pr_warning("Cannot set more than 100%% duty cycle. Set it to 100.\n"); + speed = 100; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) + iceland_fan_ctrl_stop_smc_fan_control(hwmgr); + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); + + if (0 == duty100) + return -EINVAL; + + tmp64 = (uint64_t)speed * duty100; + do_div(tmp64, 100); + duty = (uint32_t)tmp64; + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL0, FDO_STATIC_DUTY, duty); + + return iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); +} + +/** +* Reset Fan Speed to default. +* @param hwmgr the address of the powerplay hardware manager. +* @exception Always succeeds. +*/ +int iceland_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) +{ + int result; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) { + result = iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); + if (0 == result) + result = iceland_fan_ctrl_start_smc_fan_control(hwmgr); + } else + result = iceland_fan_ctrl_set_default_mode(hwmgr); + + return result; +} + +/** +* Set Fan Speed in RPM. +* @param hwmgr the address of the powerplay hardware manager. +* @param speed is the percentage value (min - max) to be set. +* @exception Fails is the speed not lie between min and max. +*/ +int iceland_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) +{ + return 0; +} + +/** +* Reads the remote temperature from the SIslands thermal controller. +* +* @param hwmgr The address of the hardware manager. +*/ +int iceland_thermal_get_temperature(struct pp_hwmgr *hwmgr) +{ + int temp; + + temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_STATUS, CTF_TEMP); + + /* + * Bit 9 means the reading is lower than the lowest usable + * value. + */ + if (0 != (0x200 & temp)) + temp = ICELAND_THERMAL_MAXIMUM_TEMP_READING; + else + temp = (temp & 0x1ff); + + temp = temp * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + return temp; +} + +/** +* Set the requested temperature range for high and low alert signals +* +* @param hwmgr The address of the hardware manager. +* @param range Temperature range to be programmed for high and low alert signals +* @exception PP_Result_BadInput if the input data is not valid. +*/ +static int iceland_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, uint32_t low_temp, uint32_t high_temp) +{ + uint32_t low = ICELAND_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + uint32_t high = ICELAND_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + if (low < low_temp) + low = low_temp; + if (high > high_temp) + high = high_temp; + + if (low > high) + return -EINVAL; + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, DIG_THERM_DPM, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + + return 0; +} + +/** +* Programs thermal controller one-time setting registers +* +* @param hwmgr The address of the hardware manager. +*/ +static int iceland_thermal_initialize(struct pp_hwmgr *hwmgr) +{ + if (0 != hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_TACH_CTRL, EDGE_PER_REV, + hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1); + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28); + + return 0; +} + +/** +* Enable thermal alerts on the RV770 thermal controller. +* +* @param hwmgr The address of the hardware manager. +*/ +static int iceland_thermal_enable_alert(struct pp_hwmgr *hwmgr) +{ + uint32_t alert; + + alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK); + alert &= ~(ICELAND_THERMAL_HIGH_ALERT_MASK | ICELAND_THERMAL_LOW_ALERT_MASK); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert); + + /* send message to SMU to enable internal thermal interrupts */ + return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable) == 0) ? 0 : -1; +} + +/** +* Disable thermal alerts on the RV770 thermal controller. +* @param hwmgr The address of the hardware manager. +*/ +static int iceland_thermal_disable_alert(struct pp_hwmgr *hwmgr) +{ + uint32_t alert; + + alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK); + alert |= (ICELAND_THERMAL_HIGH_ALERT_MASK | ICELAND_THERMAL_LOW_ALERT_MASK); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_INT, THERM_INT_MASK, alert); + + /* send message to SMU to disable internal thermal interrupts */ + return (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable) == 0) ? 0 : -1; +} + +/** +* Uninitialize the thermal controller. +* Currently just disables alerts. +* @param hwmgr The address of the hardware manager. +*/ +int iceland_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr) +{ + int result = iceland_thermal_disable_alert(hwmgr); + + if (result) + pr_warning("Failed to disable thermal alerts!\n"); + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + iceland_fan_ctrl_set_default_mode(hwmgr); + + return result; +} + +/** +* Set up the fan table to control the fan using the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int tf_iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) +{ + struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend); + SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; + uint32_t duty100; + uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2; + uint16_t fdo_min, slope1, slope2; + uint32_t reference_clock; + int res; + uint64_t tmp64; + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) + return 0; + + if (0 == data->fan_table_start) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); + + if (0 == duty100) { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); + return 0; + } + + tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100; + do_div(tmp64, 10000); + fdo_min = (uint16_t)tmp64; + + t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin; + t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed; + + pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin; + pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed; + + slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); + slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); + + fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100); + fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100); + fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100); + + fan_table.Slope1 = cpu_to_be16(slope1); + fan_table.Slope2 = cpu_to_be16(slope2); + + fan_table.FdoMin = cpu_to_be16(fdo_min); + + fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst); + + fan_table.HystUp = cpu_to_be16(1); + + fan_table.HystSlope = cpu_to_be16(1); + + fan_table.TempRespLim = cpu_to_be16(5); + + reference_clock = iceland_get_xclk(hwmgr); + + fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600); + + fan_table.FdoMax = cpu_to_be16((uint16_t)duty100); + + fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL); + + //fan_table.FanControl_GL_Flag = 1; + + res = iceland_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), data->sram_end); +/* TO DO FOR SOME DEVICE ID 0X692b, send this msg return invalid command. + if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit != 0) + res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanMinPwm, \ + hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit) ? 0 : -1); + + if (res == 0 && hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit != 0) + res = (0 == smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetFanSclkTarget, \ + hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit) ? 0 : -1); + + if (0 != res) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); +*/ + return 0; +} + +/** +* Start the fan control on the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int tf_iceland_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) +{ +/* If the fantable setup has failed we could have disabled PHM_PlatformCaps_MicrocodeFanControl even after this function was included in the table. + * Make sure that we still think controlling the fan is OK. +*/ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl)) { + iceland_fan_ctrl_start_smc_fan_control(hwmgr); + iceland_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); + } + + return 0; +} + +/** +* Set temperature range for high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +static int tf_iceland_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input; + + if (range == NULL) + return -EINVAL; + + return iceland_thermal_set_temperature_range(hwmgr, range->min, range->max); +} + +/** +* Programs one-time setting registers +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from initialize thermal controller routine +*/ +static int tf_iceland_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, + void *output, void *storage, int result) +{ + return iceland_thermal_initialize(hwmgr); +} + +/** +* Enable high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from enable alert routine +*/ +static int tf_iceland_thermal_enable_alert(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + return iceland_thermal_enable_alert(hwmgr); +} + +/** +* Disable high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from disable alert routine +*/ +static int tf_iceland_thermal_disable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result) +{ + return iceland_thermal_disable_alert(hwmgr); +} + +static const struct phm_master_table_item iceland_thermal_start_thermal_controller_master_list[] = { + { NULL, tf_iceland_thermal_initialize }, + { NULL, tf_iceland_thermal_set_temperature_range }, + { NULL, tf_iceland_thermal_enable_alert }, + /* + * We should restrict performance levels to low before we halt + * the SMC. On the other hand we are still in boot state when + * we do this so it would be pointless. If this assumption + * changes we have to revisit this table. + */ + { NULL, tf_iceland_thermal_setup_fan_table}, + { NULL, tf_iceland_thermal_start_smc_fan_control}, + { NULL, NULL } +}; + +static const struct phm_master_table_header iceland_thermal_start_thermal_controller_master = { + 0, + PHM_MasterTableFlag_None, + iceland_thermal_start_thermal_controller_master_list +}; + +static const struct phm_master_table_item iceland_thermal_set_temperature_range_master_list[] = { + { NULL, tf_iceland_thermal_disable_alert}, + { NULL, tf_iceland_thermal_set_temperature_range}, + { NULL, tf_iceland_thermal_enable_alert}, + { NULL, NULL } +}; + +static const struct phm_master_table_header iceland_thermal_set_temperature_range_master = { + 0, + PHM_MasterTableFlag_None, + iceland_thermal_set_temperature_range_master_list +}; + +int iceland_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) +{ + if (!hwmgr->thermal_controller.fanInfo.bNoFan) + iceland_fan_ctrl_set_default_mode(hwmgr); + return 0; +} + +/** +* Initializes the thermal controller related functions in the Hardware Manager structure. +* @param hwmgr The address of the hardware manager. +* @exception Any error code from the low-level communication. +*/ +int pp_iceland_thermal_initialize(struct pp_hwmgr *hwmgr) +{ + int result; + + result = phm_construct_table(hwmgr, &iceland_thermal_set_temperature_range_master, &(hwmgr->set_temperature_range)); + + if (0 == result) { + result = phm_construct_table(hwmgr, + &iceland_thermal_start_thermal_controller_master, + &(hwmgr->start_thermal_controller)); + if (0 != result) + phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range)); + } + + if (0 == result) + hwmgr->fan_ctrl_is_in_default_mode = true; + return result; +} + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h new file mode 100644 index 000000000000..267945f4df71 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h @@ -0,0 +1,58 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui <ray.huang@amd.com> + * + */ + +#ifndef ICELAND_THERMAL_H +#define ICELAND_THERMAL_H + +#include "hwmgr.h" + +#define ICELAND_THERMAL_HIGH_ALERT_MASK 0x1 +#define ICELAND_THERMAL_LOW_ALERT_MASK 0x2 + +#define ICELAND_THERMAL_MINIMUM_TEMP_READING -256 +#define ICELAND_THERMAL_MAXIMUM_TEMP_READING 255 + +#define ICELAND_THERMAL_MINIMUM_ALERT_TEMP 0 +#define ICELAND_THERMAL_MAXIMUM_ALERT_TEMP 255 + +#define FDO_PWM_MODE_STATIC 1 +#define FDO_PWM_MODE_STATIC_RPM 5 + + +extern int iceland_thermal_get_temperature(struct pp_hwmgr *hwmgr); +extern int iceland_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr); +extern int iceland_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info); +extern int iceland_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed); +extern int iceland_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode); +extern int iceland_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed); +extern int iceland_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr); +extern int pp_iceland_thermal_initialize(struct pp_hwmgr *hwmgr); +extern int iceland_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr); +extern int iceland_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed); +extern int iceland_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed); +extern int iceland_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr); + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c index 769636a0c5b5..b69132296672 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c @@ -22,7 +22,6 @@ */ #include <linux/module.h> #include <linux/slab.h> -#include <linux/fb.h> #include <asm/div64.h> #include "linux/delay.h" #include "pp_acpi.h" @@ -98,19 +97,6 @@ #define PCIE_BUS_CLK 10000 #define TCLK (PCIE_BUS_CLK / 10) - -static const uint16_t polaris10_clock_stretcher_lookup_table[2][4] = -{ {600, 1050, 3, 0}, {600, 1050, 6, 1} }; - -/* [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */ -static const uint32_t polaris10_clock_stretcher_ddt_table[2][4][4] = -{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} }, - { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } }; - -/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */ -static const uint8_t polaris10_clock_stretch_amount_conversion[2][6] = -{ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} }; - /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ enum DPM_EVENT_SRC { DPM_EVENT_SRC_ANALOG = 0, @@ -2773,9 +2759,6 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr) struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_SclkDeepSleep); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState); if (data->mvdd_control == POLARIS10_VOLTAGE_CONTROL_NONE) @@ -2820,13 +2803,6 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping); - if (hwmgr->powercontainment_enabled) - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); - else - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CAC); @@ -2905,8 +2881,8 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr) continue; } - /* need to make sure vddc is less than 2v or else, it could burn the ASIC. - * real voltage level in unit of 0.01mv */ + /* need to make sure vddc is less than 2V or else, it could burn the ASIC. + * real voltage level in unit of 0.01mV */ PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0), "Invalid VDDC value", result = -EINVAL;); @@ -3143,7 +3119,10 @@ int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr) table_info->vddc_lookup_table; uint32_t i; - if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7) { + if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7 && + ((hwmgr->sub_sys_id == 0xb37 && hwmgr->sub_vendor_id == 0x1002) || + (hwmgr->sub_sys_id == 0x4a8 && hwmgr->sub_vendor_id == 0x1043) || + (hwmgr->sub_sys_id == 0x9480 && hwmgr->sub_vendor_id == 0x1682))) { if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000) return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index 26f3e30d0fef..1126bd4f74dc 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c @@ -22,7 +22,6 @@ */ #include <linux/module.h> #include <linux/slab.h> -#include <linux/fb.h> #include "ppatomctrl.h" #include "atombios.h" diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c index c7dc111221c2..7f9ba7f15e19 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c @@ -22,7 +22,6 @@ */ #include <linux/module.h> #include <linux/slab.h> -#include <linux/fb.h> #include "linux/delay.h" #include "pp_acpi.h" #include "hwmgr.h" @@ -302,6 +301,8 @@ void tonga_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition); + tonga_initialize_power_tune_defaults(hwmgr); + data->mclk_strobe_mode_threshold = 40000; data->mclk_stutter_mode_threshold = 30000; data->mclk_edc_enable_threshold = 40000; @@ -2479,7 +2480,7 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, uint32_t graphic_level->VoltageDownHyst = 0; graphic_level->PowerThrottle = 0; - threshold = engine_clock * data->fast_watemark_threshold / 100; + threshold = engine_clock * data->fast_watermark_threshold / 100; /* *get the DAL clock. do it in funture. PECI_GetMinClockSettings(hwmgr->peci, &minClocks); @@ -2982,6 +2983,10 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot Level!", return result;); + result = tonga_populate_bapm_parameters_in_dpm_table(hwmgr); + PP_ASSERT_WITH_CODE(result == 0, + "Failed to populate BAPM Parameters!", return result); + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ClockStretcher)) { result = tonga_populate_clock_stretcher_data_table(hwmgr); @@ -4370,6 +4375,10 @@ int tonga_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to initialize ARB table index!", result = tmp_result); + tmp_result = tonga_populate_pm_fuses(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to populate PM fuses!", result = tmp_result); + tmp_result = tonga_populate_initial_mc_reg_table(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to populate initialize MC Reg table!", result = tmp_result); @@ -4388,6 +4397,18 @@ int tonga_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to start DPM!", result = tmp_result); + tmp_result = tonga_enable_smc_cac(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to enable SMC CAC!", result = tmp_result); + + tmp_result = tonga_enable_power_containment(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to enable power containment!", result = tmp_result); + + tmp_result = tonga_power_control_set_level(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to power control set level!", result = tmp_result); + return result; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h index 3961884bfa9b..fcad9426d3c1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h @@ -300,6 +300,7 @@ struct tonga_hwmgr { bool dll_defaule_on; bool performance_request_registered; + /* ----------------- Low Power Features ---------------------*/ phw_tonga_bacos bacos; phw_tonga_ulv_parm ulv; @@ -314,10 +315,14 @@ struct tonga_hwmgr { bool enable_tdc_limit_feature; bool enable_pkg_pwr_tracking_feature; bool disable_uvd_power_tune_feature; - phw_tonga_pt_defaults *power_tune_defaults; + struct tonga_pt_defaults *power_tune_defaults; SMU72_Discrete_PmFuses power_tune_table; - uint32_t ul_dte_tj_offset; /* Fudge factor in DPM table to correct HW DTE errors */ - uint32_t fast_watemark_threshold; /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */ + uint32_t dte_tj_offset; /* Fudge factor in DPM table to correct HW DTE errors */ + uint32_t fast_watermark_threshold; /* use fast watermark if clock is equal or above this. In percentage of the target high sclk. */ + + + bool enable_dte_feature; + /* ----------------- Phase Shedding ---------------------*/ bool vddc_phase_shed_control; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c new file mode 100644 index 000000000000..9496ade3247e --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c @@ -0,0 +1,498 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "hwmgr.h" +#include "smumgr.h" +#include "tonga_hwmgr.h" +#include "tonga_powertune.h" +#include "tonga_smumgr.h" +#include "smu72_discrete.h" +#include "pp_debug.h" +#include "tonga_ppsmc.h" + +#define VOLTAGE_SCALE 4 +#define POWERTUNE_DEFAULT_SET_MAX 1 + +struct tonga_pt_defaults tonga_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = { +/* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */ + {1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, + {0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61}, + {0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } }, +}; + +void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *tonga_hwmgr = (struct tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t tmp = 0; + + if (table_info && + table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX && + table_info->cac_dtp_table->usPowerTuneDataSetID) + tonga_hwmgr->power_tune_defaults = + &tonga_power_tune_data_set_array + [table_info->cac_dtp_table->usPowerTuneDataSetID - 1]; + else + tonga_hwmgr->power_tune_defaults = &tonga_power_tune_data_set_array[0]; + + /* Assume disabled */ + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SQRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TDRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TCPRamping); + + tonga_hwmgr->dte_tj_offset = tmp; + + if (!tmp) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + + tonga_hwmgr->fast_watermark_threshold = 100; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + tmp = 1; + tonga_hwmgr->enable_dte_feature = tmp ? false : true; + tonga_hwmgr->enable_tdc_limit_feature = tmp ? true : false; + tonga_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false; + } + } +} + + +int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct tonga_pt_defaults *defaults = data->power_tune_defaults; + SMU72_Discrete_DpmTable *dpm_table = &(data->smc_state_table); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table; + int i, j, k; + uint16_t *pdef1; + uint16_t *pdef2; + + + /* TDP number of fraction bits are changed from 8 to 7 for Fiji + * as requested by SMC team + */ + dpm_table->DefaultTdp = PP_HOST_TO_SMC_US( + (uint16_t)(cac_dtp_table->usTDP * 256)); + dpm_table->TargetTdp = PP_HOST_TO_SMC_US( + (uint16_t)(cac_dtp_table->usConfigurableTDP * 256)); + + PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255, + "Target Operating Temp is out of Range!", + ); + + dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp); + dpm_table->GpuTjHyst = 8; + + dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base; + + dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient); + pdef1 = defaults->bapmti_r; + pdef2 = defaults->bapmti_rc; + + for (i = 0; i < SMU72_DTE_ITERATIONS; i++) { + for (j = 0; j < SMU72_DTE_SOURCES; j++) { + for (k = 0; k < SMU72_DTE_SINKS; k++) { + dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1); + dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2); + pdef1++; + pdef2++; + } + } + } + + return 0; +} + +static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + const struct tonga_pt_defaults *defaults = data->power_tune_defaults; + + data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en; + data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddC; + data->power_tune_table.SviLoadLineTrimVddC = 3; + data->power_tune_table.SviLoadLineOffsetVddC = 0; + + return 0; +} + +static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr) +{ + uint16_t tdc_limit; + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + const struct tonga_pt_defaults *defaults = data->power_tune_defaults; + + /* TDC number of fraction bits are changed from 8 to 7 + * for Fiji as requested by SMC team + */ + tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 256); + data->power_tune_table.TDC_VDDC_PkgLimit = + CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); + data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = + defaults->tdc_vddc_throttle_release_limit_perc; + data->power_tune_table.TDC_MAWt = defaults->tdc_mawt; + + return 0; +} + +static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + const struct tonga_pt_defaults *defaults = data->power_tune_defaults; + uint32_t temp; + + if (tonga_read_smc_sram_dword(hwmgr->smumgr, + fuse_table_offset + + offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl), + (uint32_t *)&temp, data->sram_end)) + PP_ASSERT_WITH_CODE(false, + "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", + return -EINVAL); + else + data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl; + + return 0; +} + +static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr) +{ + int i; + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + data->power_tune_table.LPMLTemperatureScaler[i] = 0; + + return 0; +} + +static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + if ((hwmgr->thermal_controller.advanceFanControlParameters. + usFanOutputSensitivity & (1 << 15)) || + (hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity == 0)) + hwmgr->thermal_controller.advanceFanControlParameters. + usFanOutputSensitivity = hwmgr->thermal_controller. + advanceFanControlParameters.usDefaultFanOutputSensitivity; + + data->power_tune_table.FuzzyFan_PwmSetDelta = + PP_HOST_TO_SMC_US(hwmgr->thermal_controller. + advanceFanControlParameters.usFanOutputSensitivity); + return 0; +} + +static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr) +{ + int i; + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + data->power_tune_table.GnbLPML[i] = 0; + + return 0; +} + +static int tonga_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) +{ + return 0; +} + +static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint16_t hi_sidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd; + uint16_t lo_sidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd; + struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; + + hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); + lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); + + data->power_tune_table.BapmVddCBaseLeakageHiSidd = + CONVERT_FROM_HOST_TO_SMC_US(hi_sidd); + data->power_tune_table.BapmVddCBaseLeakageLoSidd = + CONVERT_FROM_HOST_TO_SMC_US(lo_sidd); + + return 0; +} + +int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + uint32_t pm_fuse_table_offset; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (tonga_read_smc_sram_dword(hwmgr->smumgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, PmFuseTable), + &pm_fuse_table_offset, data->sram_end)) + PP_ASSERT_WITH_CODE(false, + "Attempt to get pm_fuse_table_offset Failed!", + return -EINVAL); + + /* DW6 */ + if (tonga_populate_svi_load_line(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate SviLoadLine Failed!", + return -EINVAL); + /* DW7 */ + if (tonga_populate_tdc_limit(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TDCLimit Failed!", return -EINVAL); + /* DW8 */ + if (tonga_populate_dw8(hwmgr, pm_fuse_table_offset)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TdcWaterfallCtl Failed !", + return -EINVAL); + + /* DW9-DW12 */ + if (tonga_populate_temperature_scaler(hwmgr) != 0) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate LPMLTemperatureScaler Failed!", + return -EINVAL); + + /* DW13-DW14 */ + if (tonga_populate_fuzzy_fan(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate Fuzzy Fan Control parameters Failed!", + return -EINVAL); + + /* DW15-DW18 */ + if (tonga_populate_gnb_lpml(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Failed!", + return -EINVAL); + + /* DW19 */ + if (tonga_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Min and Max Vid Failed!", + return -EINVAL); + + /* DW20 */ + if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!", + return -EINVAL); + + if (tonga_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, + (uint8_t *)&data->power_tune_table, + sizeof(struct SMU72_Discrete_PmFuses), data->sram_end)) + PP_ASSERT_WITH_CODE(false, + "Attempt to download PmFuseTable Failed!", + return -EINVAL); + } + return 0; +} + +int tonga_enable_smc_cac(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC)) { + int smc_result; + + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_EnableCac)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to enable CAC in SMC.", result = -1); + + data->cac_enabled = (smc_result == 0) ? true : false; + } + return result; +} + +int tonga_disable_smc_cac(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC) && data->cac_enabled) { + int smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_DisableCac)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable CAC in SMC.", result = -1); + + data->cac_enabled = false; + } + return result; +} + +int tonga_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_PkgPwrLimit) + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_PkgPwrSetLimit, n); + return 0; +} + +static int tonga_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp) +{ + return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr, + PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); +} + +int tonga_enable_power_containment(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + int smc_result; + int result = 0; + + data->power_containment_features = 0; + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (data->enable_dte_feature) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_EnableDTE)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to enable DTE in SMC.", result = -1;); + if (smc_result == 0) + data->power_containment_features |= POWERCONTAINMENT_FEATURE_DTE; + } + + if (data->enable_tdc_limit_feature) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_TDCLimitEnable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to enable TDCLimit in SMC.", result = -1;); + if (smc_result == 0) + data->power_containment_features |= + POWERCONTAINMENT_FEATURE_TDCLimit; + } + + if (data->enable_pkg_pwr_tracking_feature) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to enable PkgPwrTracking in SMC.", result = -1;); + if (smc_result == 0) { + struct phm_cac_tdp_table *cac_table = + table_info->cac_dtp_table; + uint32_t default_limit = + (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256); + + data->power_containment_features |= + POWERCONTAINMENT_FEATURE_PkgPwrLimit; + + if (tonga_set_power_limit(hwmgr, default_limit)) + printk(KERN_ERR "Failed to set Default Power Limit in SMC!"); + } + } + } + return result; +} + +int tonga_disable_power_containment(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment) && + data->power_containment_features) { + int smc_result; + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_TDCLimit) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_TDCLimitDisable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable TDCLimit in SMC.", + result = smc_result); + } + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_DTE) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_DisableDTE)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable DTE in SMC.", + result = smc_result); + } + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_PkgPwrLimit) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable PkgPwrTracking in SMC.", + result = smc_result); + } + data->power_containment_features = 0; + } + + return result; +} + +int tonga_power_control_set_level(struct pp_hwmgr *hwmgr) +{ + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; + int adjust_percent, target_tdp; + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + /* adjustment percentage has already been validated */ + adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ? + hwmgr->platform_descriptor.TDPAdjustment : + (-1 * hwmgr->platform_descriptor.TDPAdjustment); + /* SMC requested that target_tdp to be 7 bit fraction in DPM table + * but message to be 8 bit fraction for messages + */ + target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100; + result = tonga_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp); + } + + return result; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h index 8e6670b3cb67..c8bdb92d81f4 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h @@ -35,20 +35,23 @@ enum _phw_tonga_ptc_config_reg_type { typedef enum _phw_tonga_ptc_config_reg_type phw_tonga_ptc_config_reg_type; /* PowerContainment Features */ +#define POWERCONTAINMENT_FEATURE_DTE 0x00000001 + + +/* PowerContainment Features */ #define POWERCONTAINMENT_FEATURE_BAPM 0x00000001 #define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 #define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 -struct _phw_tonga_pt_config_reg { +struct tonga_pt_config_reg { uint32_t Offset; uint32_t Mask; uint32_t Shift; uint32_t Value; phw_tonga_ptc_config_reg_type Type; }; -typedef struct _phw_tonga_pt_config_reg phw_tonga_pt_config_reg; -struct _phw_tonga_pt_defaults { +struct tonga_pt_defaults { uint8_t svi_load_line_en; uint8_t svi_load_line_vddC; uint8_t tdc_vddc_throttle_release_limit_perc; @@ -60,7 +63,18 @@ struct _phw_tonga_pt_defaults { uint16_t bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS]; uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS]; }; -typedef struct _phw_tonga_pt_defaults phw_tonga_pt_defaults; + + + +void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); +int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); +int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr); +int tonga_enable_smc_cac(struct pp_hwmgr *hwmgr); +int tonga_disable_smc_cac(struct pp_hwmgr *hwmgr); +int tonga_enable_power_containment(struct pp_hwmgr *hwmgr); +int tonga_disable_power_containment(struct pp_hwmgr *hwmgr); +int tonga_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); +int tonga_power_control_set_level(struct pp_hwmgr *hwmgr); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c index cfb647f76cbe..a0ffd4a73d8c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c @@ -22,7 +22,6 @@ */ #include <linux/module.h> #include <linux/slab.h> -#include <linux/fb.h> #include "tonga_processpptables.h" #include "ppatomctrl.h" diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h index b764c8c05ec8..3f8172f545b0 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h @@ -132,8 +132,10 @@ struct amd_pp_init { uint32_t chip_family; uint32_t chip_id; uint32_t rev_id; - bool powercontainment_enabled; + uint16_t sub_sys_id; + uint16_t sub_vendor_id; }; + enum amd_pp_display_config_type{ AMD_PP_DisplayConfigType_None = 0, AMD_PP_DisplayConfigType_DP54 , diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index bf0d2accf7bf..36b4ec9c9cb1 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -41,6 +41,9 @@ struct phm_fan_speed_info; struct pp_atomctrl_voltage_table; +extern int amdgpu_powercontainment; +extern int amdgpu_sclk_deep_sleep_en; + enum DISPLAY_GAP { DISPLAY_GAP_VBLANK_OR_WM = 0, /* Wait for vblank or MCHG watermark. */ DISPLAY_GAP_VBLANK = 1, /* Wait for vblank. */ @@ -614,7 +617,6 @@ struct pp_hwmgr { uint32_t num_ps; struct pp_thermal_controller_info thermal_controller; bool fan_ctrl_is_in_default_mode; - bool powercontainment_enabled; uint32_t fan_ctrl_default_mode; uint32_t tmin; struct phm_microcode_version_info microcode_version_info; @@ -637,16 +639,7 @@ extern int hw_init_power_state_table(struct pp_hwmgr *hwmgr); extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index, uint32_t value, uint32_t mask); -extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, - uint32_t index, uint32_t value, uint32_t mask); - -extern uint32_t phm_read_indirect_register(struct pp_hwmgr *hwmgr, - uint32_t indirect_port, uint32_t index); -extern void phm_write_indirect_register(struct pp_hwmgr *hwmgr, - uint32_t indirect_port, - uint32_t index, - uint32_t value); extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, uint32_t indirect_port, @@ -654,12 +647,7 @@ extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, uint32_t value, uint32_t mask); -extern void phm_wait_for_indirect_register_unequal( - struct pp_hwmgr *hwmgr, - uint32_t indirect_port, - uint32_t index, - uint32_t value, - uint32_t mask); + extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr); extern bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr); @@ -697,43 +685,7 @@ extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr); PHM_FIELD_SHIFT(reg, field)) -#define PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, index, value, mask) \ - phm_wait_on_register(hwmgr, index, value, mask) - -#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, index, value, mask) \ - phm_wait_for_register_unequal(hwmgr, index, value, mask) - -#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \ - phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask) - -#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \ - phm_wait_for_indirect_register_unequal(hwmgr, mm##port##_INDEX, index, value, mask) -#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \ - phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX_0, index, value, mask) - -#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \ - phm_wait_for_indirect_register_unequal(hwmgr, mm##port##_INDEX_0, index, value, mask) - -/* Operations on named registers. */ - -#define PHM_WAIT_REGISTER(hwmgr, reg, value, mask) \ - PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg, value, mask) - -#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \ - PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg, value, mask) - -#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ - PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) - -#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \ - PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) - -#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ - PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) - -#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \ - PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) /* Operations on named fields. */ @@ -762,60 +714,16 @@ extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr); PHM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ reg, field, fieldval)) -#define PHM_WAIT_FIELD(hwmgr, reg, field, fieldval) \ - PHM_WAIT_REGISTER(hwmgr, reg, (fieldval) \ - << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) +#define PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, index, value, mask) \ + phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask) -#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \ - PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \ - << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) -#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \ - PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \ - << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) +#define PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \ + PHM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask) -#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \ - PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, (fieldval) \ - << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) - -#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \ - PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval) \ - << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) - -#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \ - PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, (fieldval) \ +#define PHM_WAIT_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \ + PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \ << PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field)) -/* Operations on arrays of registers & fields. */ - -#define PHM_READ_ARRAY_REGISTER(device, reg, offset) \ - cgs_read_register(device, mm##reg + (offset)) - -#define PHM_WRITE_ARRAY_REGISTER(device, reg, offset, value) \ - cgs_write_register(device, mm##reg + (offset), value) - -#define PHM_WAIT_ARRAY_REGISTER(hwmgr, reg, offset, value, mask) \ - PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg + (offset), value, mask) - -#define PHM_WAIT_ARRAY_REGISTER_UNEQUAL(hwmgr, reg, offset, value, mask) \ - PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg + (offset), value, mask) - -#define PHM_READ_ARRAY_FIELD(hwmgr, reg, offset, field) \ - PHM_GET_FIELD(PHM_READ_ARRAY_REGISTER(hwmgr->device, reg, offset), reg, field) - -#define PHM_WRITE_ARRAY_FIELD(hwmgr, reg, offset, field, fieldvalue) \ - PHM_WRITE_ARRAY_REGISTER(hwmgr->device, reg, offset, \ - PHM_SET_FIELD(PHM_READ_ARRAY_REGISTER(hwmgr->device, reg, offset), \ - reg, field, fieldvalue)) - -#define PHM_WAIT_ARRAY_FIELD(hwmgr, reg, offset, field, fieldvalue) \ - PHM_WAIT_REGISTER_GIVEN_INDEX(hwmgr, mm##reg + (offset), \ - (fieldvalue) << PHM_FIELD_SHIFT(reg, field), \ - PHM_FIELD_MASK(reg, field)) - -#define PHM_WAIT_ARRAY_FIELD_UNEQUAL(hwmgr, reg, offset, field, fieldvalue) \ - PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, mm##reg + (offset), \ - (fieldvalue) << PHM_FIELD_SHIFT(reg, field), \ - PHM_FIELD_MASK(reg, field)) #endif /* _HWMGR_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu71.h b/drivers/gpu/drm/amd/powerplay/inc/smu71.h new file mode 100644 index 000000000000..71c9b2d28640 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/smu71.h @@ -0,0 +1,510 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef SMU71_H +#define SMU71_H + +#if !defined(SMC_MICROCODE) +#pragma pack(push, 1) +#endif + +#define SMU__NUM_PCIE_DPM_LEVELS 8 +#define SMU__NUM_SCLK_DPM_STATE 8 +#define SMU__NUM_MCLK_DPM_LEVELS 4 +#define SMU__VARIANT__ICELAND 1 +#define SMU__DGPU_ONLY 1 +#define SMU__DYNAMIC_MCARB_SETTINGS 1 + +enum SID_OPTION { + SID_OPTION_HI, + SID_OPTION_LO, + SID_OPTION_COUNT +}; + +typedef struct { + uint32_t high; + uint32_t low; +} data_64_t; + +typedef struct { + data_64_t high; + data_64_t low; +} data_128_t; + +#define SMU7_CONTEXT_ID_SMC 1 +#define SMU7_CONTEXT_ID_VBIOS 2 + +#define SMU71_MAX_LEVELS_VDDC 8 +#define SMU71_MAX_LEVELS_VDDCI 4 +#define SMU71_MAX_LEVELS_MVDD 4 +#define SMU71_MAX_LEVELS_VDDNB 8 + +#define SMU71_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE +#define SMU71_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS +#define SMU71_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS +#define SMU71_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS +#define SMU71_MAX_ENTRIES_SMIO 32 + +#define DPM_NO_LIMIT 0 +#define DPM_NO_UP 1 +#define DPM_GO_DOWN 2 +#define DPM_GO_UP 3 + +#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0 +#define SMU7_FIRST_DPM_MEMORY_LEVEL 0 + +#define GPIO_CLAMP_MODE_VRHOT 1 +#define GPIO_CLAMP_MODE_THERM 2 +#define GPIO_CLAMP_MODE_DC 4 + +#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0 +#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT) +#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3 +#define SCRATCH_B_CURR_PCIE_INDEX_MASK (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT) +#define SCRATCH_B_TARG_UVD_INDEX_SHIFT 6 +#define SCRATCH_B_TARG_UVD_INDEX_MASK (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT) +#define SCRATCH_B_CURR_UVD_INDEX_SHIFT 9 +#define SCRATCH_B_CURR_UVD_INDEX_MASK (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT) +#define SCRATCH_B_TARG_VCE_INDEX_SHIFT 12 +#define SCRATCH_B_TARG_VCE_INDEX_MASK (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT) +#define SCRATCH_B_CURR_VCE_INDEX_SHIFT 15 +#define SCRATCH_B_CURR_VCE_INDEX_MASK (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT) +#define SCRATCH_B_TARG_ACP_INDEX_SHIFT 18 +#define SCRATCH_B_TARG_ACP_INDEX_MASK (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT) +#define SCRATCH_B_CURR_ACP_INDEX_SHIFT 21 +#define SCRATCH_B_CURR_ACP_INDEX_MASK (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT) +#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24 +#define SCRATCH_B_TARG_SAMU_INDEX_MASK (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT) +#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27 +#define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT) + + +#if defined SMU__DGPU_ONLY +#define SMU71_DTE_ITERATIONS 5 +#define SMU71_DTE_SOURCES 3 +#define SMU71_DTE_SINKS 1 +#define SMU71_NUM_CPU_TES 0 +#define SMU71_NUM_GPU_TES 1 +#define SMU71_NUM_NON_TES 2 + +#endif + +#if defined SMU__FUSION_ONLY +#define SMU7_DTE_ITERATIONS 5 +#define SMU7_DTE_SOURCES 5 +#define SMU7_DTE_SINKS 3 +#define SMU7_NUM_CPU_TES 2 +#define SMU7_NUM_GPU_TES 1 +#define SMU7_NUM_NON_TES 2 + +#endif + +struct SMU71_PIDController +{ + uint32_t Ki; + int32_t LFWindupUpperLim; + int32_t LFWindupLowerLim; + uint32_t StatePrecision; + uint32_t LfPrecision; + uint32_t LfOffset; + uint32_t MaxState; + uint32_t MaxLfFraction; + uint32_t StateShift; +}; + +typedef struct SMU71_PIDController SMU71_PIDController; + +struct SMU7_LocalDpmScoreboard +{ + uint32_t PercentageBusy; + + int32_t PIDError; + int32_t PIDIntegral; + int32_t PIDOutput; + + uint32_t SigmaDeltaAccum; + uint32_t SigmaDeltaOutput; + uint32_t SigmaDeltaLevel; + + uint32_t UtilizationSetpoint; + + uint8_t TdpClampMode; + uint8_t TdcClampMode; + uint8_t ThermClampMode; + uint8_t VoltageBusy; + + int8_t CurrLevel; + int8_t TargLevel; + uint8_t LevelChangeInProgress; + uint8_t UpHyst; + + uint8_t DownHyst; + uint8_t VoltageDownHyst; + uint8_t DpmEnable; + uint8_t DpmRunning; + + uint8_t DpmForce; + uint8_t DpmForceLevel; + uint8_t DisplayWatermark; + uint8_t McArbIndex; + + uint32_t MinimumPerfSclk; + + uint8_t AcpiReq; + uint8_t AcpiAck; + uint8_t GfxClkSlow; + uint8_t GpioClampMode; + + uint8_t FpsFilterWeight; + uint8_t EnabledLevelsChange; + uint8_t DteClampMode; + uint8_t FpsClampMode; + + uint16_t LevelResidencyCounters [SMU71_MAX_LEVELS_GRAPHICS]; + uint16_t LevelSwitchCounters [SMU71_MAX_LEVELS_GRAPHICS]; + + void (*TargetStateCalculator)(uint8_t); + void (*SavedTargetStateCalculator)(uint8_t); + + uint16_t AutoDpmInterval; + uint16_t AutoDpmRange; + + uint8_t FpsEnabled; + uint8_t MaxPerfLevel; + uint8_t AllowLowClkInterruptToHost; + uint8_t FpsRunning; + + uint32_t MaxAllowedFrequency; +}; + +typedef struct SMU7_LocalDpmScoreboard SMU7_LocalDpmScoreboard; + +#define SMU7_MAX_VOLTAGE_CLIENTS 12 + +struct SMU7_VoltageScoreboard +{ + uint16_t CurrentVoltage; + uint16_t HighestVoltage; + uint16_t MaxVid; + uint8_t HighestVidOffset; + uint8_t CurrentVidOffset; +#if defined (SMU__DGPU_ONLY) + uint8_t CurrentPhases; + uint8_t HighestPhases; +#else + uint8_t AvsOffset; + uint8_t AvsOffsetApplied; +#endif + uint8_t ControllerBusy; + uint8_t CurrentVid; + uint16_t RequestedVoltage[SMU7_MAX_VOLTAGE_CLIENTS]; +#if defined (SMU__DGPU_ONLY) + uint8_t RequestedPhases[SMU7_MAX_VOLTAGE_CLIENTS]; +#endif + uint8_t EnabledRequest[SMU7_MAX_VOLTAGE_CLIENTS]; + uint8_t TargetIndex; + uint8_t Delay; + uint8_t ControllerEnable; + uint8_t ControllerRunning; + uint16_t CurrentStdVoltageHiSidd; + uint16_t CurrentStdVoltageLoSidd; +#if defined (SMU__DGPU_ONLY) + uint16_t RequestedVddci; + uint16_t CurrentVddci; + uint16_t HighestVddci; + uint8_t CurrentVddciVid; + uint8_t TargetVddciIndex; +#endif +}; + +typedef struct SMU7_VoltageScoreboard SMU7_VoltageScoreboard; + +// ------------------------------------------------------------------------------------------------------------------------- +#define SMU7_MAX_PCIE_LINK_SPEEDS 3 /* 0:Gen1 1:Gen2 2:Gen3 */ + +struct SMU7_PCIeLinkSpeedScoreboard +{ + uint8_t DpmEnable; + uint8_t DpmRunning; + uint8_t DpmForce; + uint8_t DpmForceLevel; + + uint8_t CurrentLinkSpeed; + uint8_t EnabledLevelsChange; + uint16_t AutoDpmInterval; + + uint16_t AutoDpmRange; + uint16_t AutoDpmCount; + + uint8_t DpmMode; + uint8_t AcpiReq; + uint8_t AcpiAck; + uint8_t CurrentLinkLevel; + +}; + +typedef struct SMU7_PCIeLinkSpeedScoreboard SMU7_PCIeLinkSpeedScoreboard; + +// -------------------------------------------------------- CAC table ------------------------------------------------------ +#define SMU7_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16 +#define SMU7_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16 + +#define SMU7_SCALE_I 7 +#define SMU7_SCALE_R 12 + +struct SMU7_PowerScoreboard +{ + uint16_t MinVoltage; + uint16_t MaxVoltage; + + uint32_t AvgGpuPower; + + uint16_t VddcLeakagePower[SID_OPTION_COUNT]; + uint16_t VddcSclkConstantPower[SID_OPTION_COUNT]; + uint16_t VddcSclkDynamicPower[SID_OPTION_COUNT]; + uint16_t VddcNonSclkDynamicPower[SID_OPTION_COUNT]; + uint16_t VddcTotalPower[SID_OPTION_COUNT]; + uint16_t VddcTotalCurrent[SID_OPTION_COUNT]; + uint16_t VddcLoadVoltage[SID_OPTION_COUNT]; + uint16_t VddcNoLoadVoltage[SID_OPTION_COUNT]; + + uint16_t DisplayPhyPower; + uint16_t PciePhyPower; + + uint16_t VddciTotalPower; + uint16_t Vddr1TotalPower; + + uint32_t RocPower; + + uint32_t last_power; + uint32_t enableWinAvg; + + uint32_t lkg_acc; + uint16_t VoltLkgeScaler; + uint16_t TempLkgeScaler; + + uint32_t uvd_cac_dclk; + uint32_t uvd_cac_vclk; + uint32_t vce_cac_eclk; + uint32_t samu_cac_samclk; + uint32_t display_cac_dispclk; + uint32_t acp_cac_aclk; + uint32_t unb_cac; + + uint32_t WinTime; + + uint16_t GpuPwr_MAWt; + uint16_t FilteredVddcTotalPower; + + uint8_t CalculationRepeats; + uint8_t WaterfallUp; + uint8_t WaterfallDown; + uint8_t WaterfallLimit; +}; + +typedef struct SMU7_PowerScoreboard SMU7_PowerScoreboard; + +// -------------------------------------------------------------------------------------------------- + +struct SMU7_ThermalScoreboard +{ + int16_t GpuLimit; + int16_t GpuHyst; + uint16_t CurrGnbTemp; + uint16_t FilteredGnbTemp; + uint8_t ControllerEnable; + uint8_t ControllerRunning; + uint8_t WaterfallUp; + uint8_t WaterfallDown; + uint8_t WaterfallLimit; + uint8_t padding[3]; +}; + +typedef struct SMU7_ThermalScoreboard SMU7_ThermalScoreboard; + +// For FeatureEnables: +#define SMU7_SCLK_DPM_CONFIG_MASK 0x01 +#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK 0x02 +#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK 0x04 +#define SMU7_MCLK_DPM_CONFIG_MASK 0x08 +#define SMU7_UVD_DPM_CONFIG_MASK 0x10 +#define SMU7_VCE_DPM_CONFIG_MASK 0x20 +#define SMU7_ACP_DPM_CONFIG_MASK 0x40 +#define SMU7_SAMU_DPM_CONFIG_MASK 0x80 +#define SMU7_PCIEGEN_DPM_CONFIG_MASK 0x100 + +#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE 0x00000001 +#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE 0x00000002 +#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE 0x00000100 +#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE 0x00000200 +#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE 0x00010000 +#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000 + +// All 'soft registers' should be uint32_t. +struct SMU71_SoftRegisters +{ + uint32_t RefClockFrequency; + uint32_t PmTimerPeriod; + uint32_t FeatureEnables; +#if defined (SMU__DGPU_ONLY) + uint32_t PreVBlankGap; + uint32_t VBlankTimeout; + uint32_t TrainTimeGap; + uint32_t MvddSwitchTime; + uint32_t LongestAcpiTrainTime; + uint32_t AcpiDelay; + uint32_t G5TrainTime; + uint32_t DelayMpllPwron; + uint32_t VoltageChangeTimeout; +#endif + uint32_t HandshakeDisables; + + uint8_t DisplayPhy1Config; + uint8_t DisplayPhy2Config; + uint8_t DisplayPhy3Config; + uint8_t DisplayPhy4Config; + + uint8_t DisplayPhy5Config; + uint8_t DisplayPhy6Config; + uint8_t DisplayPhy7Config; + uint8_t DisplayPhy8Config; + + uint32_t AverageGraphicsActivity; + uint32_t AverageMemoryActivity; + uint32_t AverageGioActivity; + + uint8_t SClkDpmEnabledLevels; + uint8_t MClkDpmEnabledLevels; + uint8_t LClkDpmEnabledLevels; + uint8_t PCIeDpmEnabledLevels; + + uint32_t DRAM_LOG_ADDR_H; + uint32_t DRAM_LOG_ADDR_L; + uint32_t DRAM_LOG_PHY_ADDR_H; + uint32_t DRAM_LOG_PHY_ADDR_L; + uint32_t DRAM_LOG_BUFF_SIZE; + uint32_t UlvEnterCount; + uint32_t UlvTime; + uint32_t UcodeLoadStatus; + uint8_t DPMFreezeAndForced; + uint8_t Activity_Weight; + uint8_t Reserved8[2]; + uint32_t Reserved; +}; + +typedef struct SMU71_SoftRegisters SMU71_SoftRegisters; + +struct SMU71_Firmware_Header +{ + uint32_t Digest[5]; + uint32_t Version; + uint32_t HeaderSize; + uint32_t Flags; + uint32_t EntryPoint; + uint32_t CodeSize; + uint32_t ImageSize; + + uint32_t Rtos; + uint32_t SoftRegisters; + uint32_t DpmTable; + uint32_t FanTable; + uint32_t CacConfigTable; + uint32_t CacStatusTable; + + uint32_t mcRegisterTable; + + uint32_t mcArbDramTimingTable; + + uint32_t PmFuseTable; + uint32_t Globals; + uint32_t UvdDpmTable; + uint32_t AcpDpmTable; + uint32_t VceDpmTable; + uint32_t SamuDpmTable; + uint32_t UlvSettings; + uint32_t Reserved[37]; + uint32_t Signature; +}; + +typedef struct SMU71_Firmware_Header SMU71_Firmware_Header; + +struct SMU7_HystController_Data +{ + uint8_t waterfall_up; + uint8_t waterfall_down; + uint8_t pstate; + uint8_t clamp_mode; +}; + +typedef struct SMU7_HystController_Data SMU7_HystController_Data; + +#define SMU71_FIRMWARE_HEADER_LOCATION 0x20000 + +enum DisplayConfig { + PowerDown = 1, + DP54x4, + DP54x2, + DP54x1, + DP27x4, + DP27x2, + DP27x1, + HDMI297, + HDMI162, + LVDS, + DP324x4, + DP324x2, + DP324x1 +}; + +//#define SX_BLOCK_COUNT 8 +//#define MC_BLOCK_COUNT 1 +//#define CPL_BLOCK_COUNT 27 + +#if defined SMU__VARIANT__ICELAND + #define SX_BLOCK_COUNT 8 + #define MC_BLOCK_COUNT 1 + #define CPL_BLOCK_COUNT 29 +#endif + +struct SMU7_Local_Cac { + uint8_t BlockId; + uint8_t SignalId; + uint8_t Threshold; + uint8_t Padding; +}; + +typedef struct SMU7_Local_Cac SMU7_Local_Cac; + +struct SMU7_Local_Cac_Table { + SMU7_Local_Cac SxLocalCac[SX_BLOCK_COUNT]; + SMU7_Local_Cac CplLocalCac[CPL_BLOCK_COUNT]; + SMU7_Local_Cac McLocalCac[MC_BLOCK_COUNT]; +}; + +typedef struct SMU7_Local_Cac_Table SMU7_Local_Cac_Table; + +#if !defined(SMC_MICROCODE) +#pragma pack(pop) +#endif + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h new file mode 100644 index 000000000000..c0e3936d5c2e --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h @@ -0,0 +1,631 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef SMU71_DISCRETE_H +#define SMU71_DISCRETE_H + +#include "smu71.h" + +#if !defined(SMC_MICROCODE) +#pragma pack(push, 1) +#endif + +#define VDDC_ON_SVI2 0x1 +#define VDDCI_ON_SVI2 0x2 +#define MVDD_ON_SVI2 0x4 + +struct SMU71_Discrete_VoltageLevel +{ + uint16_t Voltage; + uint16_t StdVoltageHiSidd; + uint16_t StdVoltageLoSidd; + uint8_t Smio; + uint8_t padding; +}; + +typedef struct SMU71_Discrete_VoltageLevel SMU71_Discrete_VoltageLevel; + +struct SMU71_Discrete_GraphicsLevel +{ + uint32_t MinVddc; + uint32_t MinVddcPhases; + + uint32_t SclkFrequency; + + uint8_t pcieDpmLevel; + uint8_t DeepSleepDivId; + uint16_t ActivityLevel; + + uint32_t CgSpllFuncCntl3; + uint32_t CgSpllFuncCntl4; + uint32_t SpllSpreadSpectrum; + uint32_t SpllSpreadSpectrum2; + uint32_t CcPwrDynRm; + uint32_t CcPwrDynRm1; + uint8_t SclkDid; + uint8_t DisplayWatermark; + uint8_t EnabledForActivity; + uint8_t EnabledForThrottle; + uint8_t UpHyst; + uint8_t DownHyst; + uint8_t VoltageDownHyst; + uint8_t PowerThrottle; +}; + +typedef struct SMU71_Discrete_GraphicsLevel SMU71_Discrete_GraphicsLevel; + +struct SMU71_Discrete_ACPILevel +{ + uint32_t Flags; + uint32_t MinVddc; + uint32_t MinVddcPhases; + uint32_t SclkFrequency; + uint8_t SclkDid; + uint8_t DisplayWatermark; + uint8_t DeepSleepDivId; + uint8_t padding; + uint32_t CgSpllFuncCntl; + uint32_t CgSpllFuncCntl2; + uint32_t CgSpllFuncCntl3; + uint32_t CgSpllFuncCntl4; + uint32_t SpllSpreadSpectrum; + uint32_t SpllSpreadSpectrum2; + uint32_t CcPwrDynRm; + uint32_t CcPwrDynRm1; +}; + +typedef struct SMU71_Discrete_ACPILevel SMU71_Discrete_ACPILevel; + +struct SMU71_Discrete_Ulv +{ + uint32_t CcPwrDynRm; + uint32_t CcPwrDynRm1; + uint16_t VddcOffset; + uint8_t VddcOffsetVid; + uint8_t VddcPhase; + uint32_t Reserved; +}; + +typedef struct SMU71_Discrete_Ulv SMU71_Discrete_Ulv; + +struct SMU71_Discrete_MemoryLevel +{ + uint32_t MinVddc; + uint32_t MinVddcPhases; + uint32_t MinVddci; + uint32_t MinMvdd; + + uint32_t MclkFrequency; + + uint8_t EdcReadEnable; + uint8_t EdcWriteEnable; + uint8_t RttEnable; + uint8_t StutterEnable; + + uint8_t StrobeEnable; + uint8_t StrobeRatio; + uint8_t EnabledForThrottle; + uint8_t EnabledForActivity; + + uint8_t UpHyst; + uint8_t DownHyst; + uint8_t VoltageDownHyst; + uint8_t padding; + + uint16_t ActivityLevel; + uint8_t DisplayWatermark; + uint8_t padding1; + + uint32_t MpllFuncCntl; + uint32_t MpllFuncCntl_1; + uint32_t MpllFuncCntl_2; + uint32_t MpllAdFuncCntl; + uint32_t MpllDqFuncCntl; + uint32_t MclkPwrmgtCntl; + uint32_t DllCntl; + uint32_t MpllSs1; + uint32_t MpllSs2; +}; + +typedef struct SMU71_Discrete_MemoryLevel SMU71_Discrete_MemoryLevel; + +struct SMU71_Discrete_LinkLevel +{ + uint8_t PcieGenSpeed; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 + uint8_t PcieLaneCount; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16 + uint8_t EnabledForActivity; + uint8_t SPC; + uint32_t DownThreshold; + uint32_t UpThreshold; + uint32_t Reserved; +}; + +typedef struct SMU71_Discrete_LinkLevel SMU71_Discrete_LinkLevel; + + +#ifdef SMU__DYNAMIC_MCARB_SETTINGS +// MC ARB DRAM Timing registers. +struct SMU71_Discrete_MCArbDramTimingTableEntry +{ + uint32_t McArbDramTiming; + uint32_t McArbDramTiming2; + uint8_t McArbBurstTime; + uint8_t padding[3]; +}; + +typedef struct SMU71_Discrete_MCArbDramTimingTableEntry SMU71_Discrete_MCArbDramTimingTableEntry; + +struct SMU71_Discrete_MCArbDramTimingTable +{ + SMU71_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS]; +}; + +typedef struct SMU71_Discrete_MCArbDramTimingTable SMU71_Discrete_MCArbDramTimingTable; +#endif + +// UVD VCLK/DCLK state (level) definition. +struct SMU71_Discrete_UvdLevel +{ + uint32_t VclkFrequency; + uint32_t DclkFrequency; + uint16_t MinVddc; + uint8_t MinVddcPhases; + uint8_t VclkDivider; + uint8_t DclkDivider; + uint8_t padding[3]; +}; + +typedef struct SMU71_Discrete_UvdLevel SMU71_Discrete_UvdLevel; + +// Clocks for other external blocks (VCE, ACP, SAMU). +struct SMU71_Discrete_ExtClkLevel +{ + uint32_t Frequency; + uint16_t MinVoltage; + uint8_t MinPhases; + uint8_t Divider; +}; + +typedef struct SMU71_Discrete_ExtClkLevel SMU71_Discrete_ExtClkLevel; + +// Everything that we need to keep track of about the current state. +// Use this instead of copies of the GraphicsLevel and MemoryLevel structures to keep track of state parameters +// that need to be checked later. +// We don't need to cache everything about a state, just a few parameters. +struct SMU71_Discrete_StateInfo +{ + uint32_t SclkFrequency; + uint32_t MclkFrequency; + uint32_t VclkFrequency; + uint32_t DclkFrequency; + uint32_t SamclkFrequency; + uint32_t AclkFrequency; + uint32_t EclkFrequency; + uint16_t MvddVoltage; + uint16_t padding16; + uint8_t DisplayWatermark; + uint8_t McArbIndex; + uint8_t McRegIndex; + uint8_t SeqIndex; + uint8_t SclkDid; + int8_t SclkIndex; + int8_t MclkIndex; + uint8_t PCIeGen; + +}; + +typedef struct SMU71_Discrete_StateInfo SMU71_Discrete_StateInfo; + + +struct SMU71_Discrete_DpmTable +{ + // Multi-DPM controller settings + SMU71_PIDController GraphicsPIDController; + SMU71_PIDController MemoryPIDController; + SMU71_PIDController LinkPIDController; + + uint32_t SystemFlags; + + // SMIO masks for voltage and phase controls + uint32_t SmioMaskVddcVid; + uint32_t SmioMaskVddcPhase; + uint32_t SmioMaskVddciVid; + uint32_t SmioMaskMvddVid; + + uint32_t VddcLevelCount; + uint32_t VddciLevelCount; + uint32_t MvddLevelCount; + + SMU71_Discrete_VoltageLevel VddcLevel [SMU71_MAX_LEVELS_VDDC]; + SMU71_Discrete_VoltageLevel VddciLevel [SMU71_MAX_LEVELS_VDDCI]; + SMU71_Discrete_VoltageLevel MvddLevel [SMU71_MAX_LEVELS_MVDD]; + + uint8_t GraphicsDpmLevelCount; + uint8_t MemoryDpmLevelCount; + uint8_t LinkLevelCount; + uint8_t MasterDeepSleepControl; + + uint32_t Reserved[5]; + + // State table entries for each DPM state + SMU71_Discrete_GraphicsLevel GraphicsLevel [SMU71_MAX_LEVELS_GRAPHICS]; + SMU71_Discrete_MemoryLevel MemoryACPILevel; + SMU71_Discrete_MemoryLevel MemoryLevel [SMU71_MAX_LEVELS_MEMORY]; + SMU71_Discrete_LinkLevel LinkLevel [SMU71_MAX_LEVELS_LINK]; + SMU71_Discrete_ACPILevel ACPILevel; + + uint32_t SclkStepSize; + uint32_t Smio [SMU71_MAX_ENTRIES_SMIO]; + + uint8_t GraphicsBootLevel; + uint8_t GraphicsVoltageChangeEnable; + uint8_t GraphicsThermThrottleEnable; + uint8_t GraphicsInterval; + + uint8_t VoltageInterval; + uint8_t ThermalInterval; + uint16_t TemperatureLimitHigh; + + uint16_t TemperatureLimitLow; + uint8_t MemoryBootLevel; + uint8_t MemoryVoltageChangeEnable; + + uint8_t MemoryInterval; + uint8_t MemoryThermThrottleEnable; + uint8_t MergedVddci; + uint8_t padding2; + + uint16_t VoltageResponseTime; + uint16_t PhaseResponseTime; + + uint8_t PCIeBootLinkLevel; + uint8_t PCIeGenInterval; + uint8_t DTEInterval; + uint8_t DTEMode; + + uint8_t SVI2Enable; + uint8_t VRHotGpio; + uint8_t AcDcGpio; + uint8_t ThermGpio; + + uint32_t DisplayCac; + + uint16_t MaxPwr; + uint16_t NomPwr; + + uint16_t FpsHighThreshold; + uint16_t FpsLowThreshold; + + uint16_t BAPMTI_R [SMU71_DTE_ITERATIONS][SMU71_DTE_SOURCES][SMU71_DTE_SINKS]; + uint16_t BAPMTI_RC [SMU71_DTE_ITERATIONS][SMU71_DTE_SOURCES][SMU71_DTE_SINKS]; + + uint8_t DTEAmbientTempBase; + uint8_t DTETjOffset; + uint8_t GpuTjMax; + uint8_t GpuTjHyst; + + uint16_t BootVddc; + uint16_t BootVddci; + + uint16_t BootMVdd; + uint16_t padding; + + uint32_t BAPM_TEMP_GRADIENT; + + uint32_t LowSclkInterruptThreshold; + uint32_t VddGfxReChkWait; + + uint16_t PPM_PkgPwrLimit; + uint16_t PPM_TemperatureLimit; + + uint16_t DefaultTdp; + uint16_t TargetTdp; +}; + +typedef struct SMU71_Discrete_DpmTable SMU71_Discrete_DpmTable; + +// --------------------------------------------------- AC Timing Parameters ------------------------------------------------ +#define SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE 16 +#define SMU71_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT SMU71_MAX_LEVELS_MEMORY + +struct SMU71_Discrete_MCRegisterAddress +{ + uint16_t s0; + uint16_t s1; +}; + +typedef struct SMU71_Discrete_MCRegisterAddress SMU71_Discrete_MCRegisterAddress; + +struct SMU71_Discrete_MCRegisterSet +{ + uint32_t value[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE]; +}; + +typedef struct SMU71_Discrete_MCRegisterSet SMU71_Discrete_MCRegisterSet; + +struct SMU71_Discrete_MCRegisters +{ + uint8_t last; + uint8_t reserved[3]; + SMU71_Discrete_MCRegisterAddress address[SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE]; + SMU71_Discrete_MCRegisterSet data[SMU71_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT]; +}; + +typedef struct SMU71_Discrete_MCRegisters SMU71_Discrete_MCRegisters; + + +// --------------------------------------------------- Fan Table ----------------------------------------------------------- +struct SMU71_Discrete_FanTable +{ + uint16_t FdoMode; + int16_t TempMin; + int16_t TempMed; + int16_t TempMax; + int16_t Slope1; + int16_t Slope2; + int16_t FdoMin; + int16_t HystUp; + int16_t HystDown; + int16_t HystSlope; + int16_t TempRespLim; + int16_t TempCurr; + int16_t SlopeCurr; + int16_t PwmCurr; + uint32_t RefreshPeriod; + int16_t FdoMax; + uint8_t TempSrc; + int8_t Padding; +}; + +typedef struct SMU71_Discrete_FanTable SMU71_Discrete_FanTable; + +#define SMU7_DISCRETE_GPIO_SCLK_DEBUG 4 +#define SMU7_DISCRETE_GPIO_SCLK_DEBUG_BIT (0x1 << SMU7_DISCRETE_GPIO_SCLK_DEBUG) + +struct SMU71_MclkDpmScoreboard +{ + + uint32_t PercentageBusy; + + int32_t PIDError; + int32_t PIDIntegral; + int32_t PIDOutput; + + uint32_t SigmaDeltaAccum; + uint32_t SigmaDeltaOutput; + uint32_t SigmaDeltaLevel; + + uint32_t UtilizationSetpoint; + + uint8_t TdpClampMode; + uint8_t TdcClampMode; + uint8_t ThermClampMode; + uint8_t VoltageBusy; + + int8_t CurrLevel; + int8_t TargLevel; + uint8_t LevelChangeInProgress; + uint8_t UpHyst; + + uint8_t DownHyst; + uint8_t VoltageDownHyst; + uint8_t DpmEnable; + uint8_t DpmRunning; + + uint8_t DpmForce; + uint8_t DpmForceLevel; + uint8_t DisplayWatermark; + uint8_t McArbIndex; + + uint32_t MinimumPerfMclk; + + uint8_t AcpiReq; + uint8_t AcpiAck; + uint8_t MclkSwitchInProgress; + uint8_t MclkSwitchCritical; + + uint8_t TargetMclkIndex; + uint8_t TargetMvddIndex; + uint8_t MclkSwitchResult; + + uint8_t EnabledLevelsChange; + + uint16_t LevelResidencyCounters [SMU71_MAX_LEVELS_MEMORY]; + uint16_t LevelSwitchCounters [SMU71_MAX_LEVELS_MEMORY]; + + void (*TargetStateCalculator)(uint8_t); + void (*SavedTargetStateCalculator)(uint8_t); + + uint16_t AutoDpmInterval; + uint16_t AutoDpmRange; + + uint16_t MclkSwitchingTime; + uint8_t padding[2]; +}; + +typedef struct SMU71_MclkDpmScoreboard SMU71_MclkDpmScoreboard; + +struct SMU71_UlvScoreboard +{ + uint8_t EnterUlv; + uint8_t ExitUlv; + uint8_t UlvActive; + uint8_t WaitingForUlv; + uint8_t UlvEnable; + uint8_t UlvRunning; + uint8_t UlvMasterEnable; + uint8_t padding; + uint32_t UlvAbortedCount; + uint32_t UlvTimeStamp; +}; + +typedef struct SMU71_UlvScoreboard SMU71_UlvScoreboard; + +struct SMU71_VddGfxScoreboard +{ + uint8_t VddGfxEnable; + uint8_t VddGfxActive; + uint8_t padding[2]; + + uint32_t VddGfxEnteredCount; + uint32_t VddGfxAbortedCount; +}; + +typedef struct SMU71_VddGfxScoreboard SMU71_VddGfxScoreboard; + +struct SMU71_AcpiScoreboard { + uint32_t SavedInterruptMask[2]; + uint8_t LastACPIRequest; + uint8_t CgBifResp; + uint8_t RequestType; + uint8_t Padding; + SMU71_Discrete_ACPILevel D0Level; +}; + +typedef struct SMU71_AcpiScoreboard SMU71_AcpiScoreboard; + + +struct SMU71_Discrete_PmFuses { + // dw0-dw1 + uint8_t BapmVddCVidHiSidd[8]; + + // dw2-dw3 + uint8_t BapmVddCVidLoSidd[8]; + + // dw4-dw5 + uint8_t VddCVid[8]; + + // dw6 + uint8_t SviLoadLineEn; + uint8_t SviLoadLineVddC; + uint8_t SviLoadLineTrimVddC; + uint8_t SviLoadLineOffsetVddC; + + // dw7 + uint16_t TDC_VDDC_PkgLimit; + uint8_t TDC_VDDC_ThrottleReleaseLimitPerc; + uint8_t TDC_MAWt; + + // dw8 + uint8_t TdcWaterfallCtl; + uint8_t LPMLTemperatureMin; + uint8_t LPMLTemperatureMax; + uint8_t Reserved; + + // dw9-dw12 + uint8_t LPMLTemperatureScaler[16]; + + // dw13-dw14 + int16_t FuzzyFan_ErrorSetDelta; + int16_t FuzzyFan_ErrorRateSetDelta; + int16_t FuzzyFan_PwmSetDelta; + uint16_t Reserved6; + + // dw15 + uint8_t GnbLPML[16]; + + // dw15 + uint8_t GnbLPMLMaxVid; + uint8_t GnbLPMLMinVid; + uint8_t Reserved1[2]; + + // dw16 + uint16_t BapmVddCBaseLeakageHiSidd; + uint16_t BapmVddCBaseLeakageLoSidd; +}; + +typedef struct SMU71_Discrete_PmFuses SMU71_Discrete_PmFuses; + +struct SMU71_Discrete_Log_Header_Table { + uint32_t version; + uint32_t asic_id; + uint16_t flags; + uint16_t entry_size; + uint32_t total_size; + uint32_t num_of_entries; + uint8_t type; + uint8_t mode; + uint8_t filler_0[2]; + uint32_t filler_1[2]; +}; + +typedef struct SMU71_Discrete_Log_Header_Table SMU71_Discrete_Log_Header_Table; + +struct SMU71_Discrete_Log_Cntl { + uint8_t Enabled; + uint8_t Type; + uint8_t padding[2]; + uint32_t BufferSize; + uint32_t SamplesLogged; + uint32_t SampleSize; + uint32_t AddrL; + uint32_t AddrH; +}; + +typedef struct SMU71_Discrete_Log_Cntl SMU71_Discrete_Log_Cntl; + +#if defined SMU__DGPU_ONLY + #define CAC_ACC_NW_NUM_OF_SIGNALS 83 +#endif + + +struct SMU71_Discrete_Cac_Collection_Table { + uint32_t temperature; + uint32_t cac_acc_nw[CAC_ACC_NW_NUM_OF_SIGNALS]; + uint32_t filler[4]; +}; + +typedef struct SMU71_Discrete_Cac_Collection_Table SMU71_Discrete_Cac_Collection_Table; + +struct SMU71_Discrete_Cac_Verification_Table { + uint32_t VddcTotalPower; + uint32_t VddcLeakagePower; + uint32_t VddcConstantPower; + uint32_t VddcGfxDynamicPower; + uint32_t VddcUvdDynamicPower; + uint32_t VddcVceDynamicPower; + uint32_t VddcAcpDynamicPower; + uint32_t VddcPcieDynamicPower; + uint32_t VddcDceDynamicPower; + uint32_t VddcCurrent; + uint32_t VddcVoltage; + uint32_t VddciTotalPower; + uint32_t VddciLeakagePower; + uint32_t VddciConstantPower; + uint32_t VddciDynamicPower; + uint32_t Vddr1TotalPower; + uint32_t Vddr1LeakagePower; + uint32_t Vddr1ConstantPower; + uint32_t Vddr1DynamicPower; + uint32_t spare[8]; + uint32_t temperature; +}; + +typedef struct SMU71_Discrete_Cac_Verification_Table SMU71_Discrete_Cac_Verification_Table; + +#if !defined(SMC_MICROCODE) +#pragma pack(pop) +#endif + + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile index f10fb64ef981..19e79469f6bc 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile @@ -2,7 +2,8 @@ # Makefile for the 'smu manager' sub-component of powerplay. # It provides the smu management services for the driver. -SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o polaris10_smumgr.o +SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o \ + polaris10_smumgr.o iceland_smumgr.o AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c new file mode 100644 index 000000000000..f50658332d9d --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -0,0 +1,713 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui <ray.huang@amd.com> + * + */ +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/gfp.h> + +#include "smumgr.h" +#include "iceland_smumgr.h" +#include "pp_debug.h" +#include "smu_ucode_xfer_vi.h" +#include "ppsmc.h" +#include "smu/smu_7_1_1_d.h" +#include "smu/smu_7_1_1_sh_mask.h" +#include "cgs_common.h" + +#define ICELAND_SMC_SIZE 0x20000 +#define BUFFER_SIZE 80000 +#define MAX_STRING_SIZE 15 +#define BUFFER_SIZETWO 131072 /*128 *1024*/ + +/** + * Set the address for reading/writing the SMC SRAM space. + * @param smumgr the address of the powerplay hardware manager. + * @param smcAddress the address in the SMC RAM to access. + */ +static int iceland_set_smc_sram_address(struct pp_smumgr *smumgr, + uint32_t smcAddress, uint32_t limit) +{ + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + PP_ASSERT_WITH_CODE((0 == (3 & smcAddress)), + "SMC address must be 4 byte aligned.", + return -1;); + + PP_ASSERT_WITH_CODE((limit > (smcAddress + 3)), + "SMC address is beyond the SMC RAM area.", + return -1;); + + cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, smcAddress); + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); + + return 0; +} + +/** + * Copy bytes from an array into the SMC RAM space. + * + * @param smumgr the address of the powerplay SMU manager. + * @param smcStartAddress the start address in the SMC RAM to copy bytes to. + * @param src the byte array to copy the bytes from. + * @param byteCount the number of bytes to copy. + */ +int iceland_copy_bytes_to_smc(struct pp_smumgr *smumgr, + uint32_t smcStartAddress, const uint8_t *src, + uint32_t byteCount, uint32_t limit) +{ + uint32_t addr; + uint32_t data, orig_data; + int result = 0; + uint32_t extra_shift; + + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + PP_ASSERT_WITH_CODE((0 == (3 & smcStartAddress)), + "SMC address must be 4 byte aligned.", + return 0;); + + PP_ASSERT_WITH_CODE((limit > (smcStartAddress + byteCount)), + "SMC address is beyond the SMC RAM area.", + return 0;); + + addr = smcStartAddress; + + while (byteCount >= 4) { + /* + * Bytes are written into the + * SMC address space with the MSB first + */ + data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; + + result = iceland_set_smc_sram_address(smumgr, addr, limit); + + if (result) + goto out; + + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + + src += 4; + byteCount -= 4; + addr += 4; + } + + if (0 != byteCount) { + /* Now write odd bytes left, do a read modify write cycle */ + data = 0; + + result = iceland_set_smc_sram_address(smumgr, addr, limit); + if (result) + goto out; + + orig_data = cgs_read_register(smumgr->device, + mmSMC_IND_DATA_0); + extra_shift = 8 * (4 - byteCount); + + while (byteCount > 0) { + data = (data << 8) + *src++; + byteCount--; + } + + data <<= extra_shift; + data |= (orig_data & ~((~0UL) << extra_shift)); + + result = iceland_set_smc_sram_address(smumgr, addr, limit); + if (result) + goto out; + + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + } + +out: + return result; +} + +/** + * Deassert the reset'pin' (set it to high). + * + * @param smumgr the address of the powerplay hardware manager. + */ +static int iceland_start_smc(struct pp_smumgr *smumgr) +{ + SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, rst_reg, 0); + + return 0; +} + +static void iceland_pp_reset_smc(struct pp_smumgr *smumgr) +{ + SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, + rst_reg, 1); +} + +int iceland_program_jump_on_start(struct pp_smumgr *smumgr) +{ + static const unsigned char pData[] = { 0xE0, 0x00, 0x80, 0x40 }; + + iceland_copy_bytes_to_smc(smumgr, 0x0, pData, 4, sizeof(pData)+1); + + return 0; +} + +/** + * Return if the SMC is currently running. + * + * @param smumgr the address of the powerplay hardware manager. + */ +bool iceland_is_smc_ram_running(struct pp_smumgr *smumgr) +{ + uint32_t val1, val2; + + val1 = SMUM_READ_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_CLOCK_CNTL_0, ck_disable); + val2 = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, + ixSMC_PC_C); + + return ((0 == val1) && (0x20100 <= val2)); +} + +/** + * Send a message to the SMC, and wait for its response. + * + * @param smumgr the address of the powerplay hardware manager. + * @param msg the message to send. + * @return The response that came from the SMC. + */ +static int iceland_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +{ + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + if (!iceland_is_smc_ram_running(smumgr)) + return -EINVAL; + + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + PP_ASSERT_WITH_CODE( + 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP), + "Failed to send Previous Message.", + ); + + cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); + + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); + PP_ASSERT_WITH_CODE( + 1 == SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP), + "Failed to send Message.", + ); + + return 0; +} + +/** + * Send a message to the SMC with parameter + * + * @param smumgr: the address of the powerplay hardware manager. + * @param msg: the message to send. + * @param parameter: the parameter to send + * @return The response that came from the SMC. + */ +static int iceland_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, + uint16_t msg, uint32_t parameter) +{ + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter); + + return iceland_send_msg_to_smc(smumgr, msg); +} + +/* + * Read a 32bit value from the SMC SRAM space. + * ALL PARAMETERS ARE IN HOST BYTE ORDER. + * @param smumgr the address of the powerplay hardware manager. + * @param smcAddress the address in the SMC RAM to access. + * @param value and output parameter for the data read from the SMC SRAM. + */ +int iceland_read_smc_sram_dword(struct pp_smumgr *smumgr, + uint32_t smcAddress, uint32_t *value, + uint32_t limit) +{ + int result; + + result = iceland_set_smc_sram_address(smumgr, smcAddress, limit); + + if (0 != result) + return result; + + *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_0); + + return 0; +} + +/* + * Write a 32bit value to the SMC SRAM space. + * ALL PARAMETERS ARE IN HOST BYTE ORDER. + * @param smumgr the address of the powerplay hardware manager. + * @param smcAddress the address in the SMC RAM to access. + * @param value to write to the SMC SRAM. + */ +int iceland_write_smc_sram_dword(struct pp_smumgr *smumgr, + uint32_t smcAddress, uint32_t value, + uint32_t limit) +{ + int result; + + result = iceland_set_smc_sram_address(smumgr, smcAddress, limit); + + if (0 != result) + return result; + + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, value); + + return 0; +} + +static int iceland_smu_fini(struct pp_smumgr *smumgr) +{ + struct iceland_smumgr *priv = (struct iceland_smumgr *)(smumgr->backend); + + smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle); + + if (smumgr->backend != NULL) { + kfree(smumgr->backend); + smumgr->backend = NULL; + } + + cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); + return 0; +} + +static enum cgs_ucode_id iceland_convert_fw_type_to_cgs(uint32_t fw_type) +{ + enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM; + + switch (fw_type) { + case UCODE_ID_SMU: + result = CGS_UCODE_ID_SMU; + break; + case UCODE_ID_SDMA0: + result = CGS_UCODE_ID_SDMA0; + break; + case UCODE_ID_SDMA1: + result = CGS_UCODE_ID_SDMA1; + break; + case UCODE_ID_CP_CE: + result = CGS_UCODE_ID_CP_CE; + break; + case UCODE_ID_CP_PFP: + result = CGS_UCODE_ID_CP_PFP; + break; + case UCODE_ID_CP_ME: + result = CGS_UCODE_ID_CP_ME; + break; + case UCODE_ID_CP_MEC: + result = CGS_UCODE_ID_CP_MEC; + break; + case UCODE_ID_CP_MEC_JT1: + result = CGS_UCODE_ID_CP_MEC_JT1; + break; + case UCODE_ID_CP_MEC_JT2: + result = CGS_UCODE_ID_CP_MEC_JT2; + break; + case UCODE_ID_RLC_G: + result = CGS_UCODE_ID_RLC_G; + break; + default: + break; + } + + return result; +} + +/** + * Convert the PPIRI firmware type to SMU type mask. + * For MEC, we need to check all MEC related type + */ +static uint16_t iceland_get_mask_for_firmware_type(uint16_t firmwareType) +{ + uint16_t result = 0; + + switch (firmwareType) { + case UCODE_ID_SDMA0: + result = UCODE_ID_SDMA0_MASK; + break; + case UCODE_ID_SDMA1: + result = UCODE_ID_SDMA1_MASK; + break; + case UCODE_ID_CP_CE: + result = UCODE_ID_CP_CE_MASK; + break; + case UCODE_ID_CP_PFP: + result = UCODE_ID_CP_PFP_MASK; + break; + case UCODE_ID_CP_ME: + result = UCODE_ID_CP_ME_MASK; + break; + case UCODE_ID_CP_MEC: + case UCODE_ID_CP_MEC_JT1: + case UCODE_ID_CP_MEC_JT2: + result = UCODE_ID_CP_MEC_MASK; + break; + case UCODE_ID_RLC_G: + result = UCODE_ID_RLC_G_MASK; + break; + default: + break; + } + + return result; +} + +/** + * Check if the FW has been loaded, + * SMU will not return if loading has not finished. +*/ +static int iceland_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fwType) +{ + uint16_t fwMask = iceland_get_mask_for_firmware_type(fwType); + + if (0 != SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND, + SOFT_REGISTERS_TABLE_27, fwMask, fwMask)) { + pr_err("[ powerplay ] check firmware loading failed\n"); + return -EINVAL; + } + + return 0; +} + +/* Populate one firmware image to the data structure */ +static int iceland_populate_single_firmware_entry(struct pp_smumgr *smumgr, + uint16_t firmware_type, + struct SMU_Entry *pentry) +{ + int result; + struct cgs_firmware_info info = {0}; + + result = cgs_get_firmware_info( + smumgr->device, + iceland_convert_fw_type_to_cgs(firmware_type), + &info); + + if (result == 0) { + pentry->version = 0; + pentry->id = (uint16_t)firmware_type; + pentry->image_addr_high = smu_upper_32_bits(info.mc_addr); + pentry->image_addr_low = smu_lower_32_bits(info.mc_addr); + pentry->meta_data_addr_high = 0; + pentry->meta_data_addr_low = 0; + pentry->data_size_byte = info.image_size; + pentry->num_register_entries = 0; + + if (firmware_type == UCODE_ID_RLC_G) + pentry->flags = 1; + else + pentry->flags = 0; + } else { + return result; + } + + return result; +} + +static void iceland_pp_stop_smc_clock(struct pp_smumgr *smumgr) +{ + SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_CLOCK_CNTL_0, + ck_disable, 1); +} + +static void iceland_start_smc_clock(struct pp_smumgr *smumgr) +{ + SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_CLOCK_CNTL_0, + ck_disable, 0); +} + +int iceland_smu_start_smc(struct pp_smumgr *smumgr) +{ + /* set smc instruct start point at 0x0 */ + iceland_program_jump_on_start(smumgr); + + /* enable smc clock */ + iceland_start_smc_clock(smumgr); + + /* de-assert reset */ + iceland_start_smc(smumgr); + + SMUM_WAIT_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS, + INTERRUPTS_ENABLED, 1); + + return 0; +} + +/** + * Upload the SMC firmware to the SMC microcontroller. + * + * @param smumgr the address of the powerplay hardware manager. + * @param pFirmware the data structure containing the various sections of the firmware. + */ +int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr) +{ + const uint8_t *src; + uint32_t byte_count, val; + uint32_t data; + struct cgs_firmware_info info = {0}; + + if (smumgr == NULL || smumgr->device == NULL) + return -EINVAL; + + /* load SMC firmware */ + cgs_get_firmware_info(smumgr->device, + iceland_convert_fw_type_to_cgs(UCODE_ID_SMU), &info); + + if (info.image_size & 3) { + pr_err("[ powerplay ] SMC ucode is not 4 bytes aligned\n"); + return -EINVAL; + } + + if (info.image_size > ICELAND_SMC_SIZE) { + pr_err("[ powerplay ] SMC address is beyond the SMC RAM area\n"); + return -EINVAL; + } + + /* wait for smc boot up */ + SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, + RCU_UC_EVENTS, boot_seq_done, 0); + + /* clear firmware interrupt enable flag */ + val = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, + ixSMC_SYSCON_MISC_CNTL); + cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, + ixSMC_SYSCON_MISC_CNTL, val | 1); + + /* stop smc clock */ + iceland_pp_stop_smc_clock(smumgr); + + /* reset smc */ + iceland_pp_reset_smc(smumgr); + + cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, + info.ucode_start_address); + + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, + AUTO_INCREMENT_IND_0, 1); + + byte_count = info.image_size; + src = (const uint8_t *)info.kptr; + + while (byte_count >= 4) { + data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; + cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data); + src += 4; + byte_count -= 4; + } + + SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, + AUTO_INCREMENT_IND_0, 0); + + return 0; +} + +static int iceland_request_smu_reload_fw(struct pp_smumgr *smumgr) +{ + struct iceland_smumgr *iceland_smu = + (struct iceland_smumgr *)(smumgr->backend); + uint16_t fw_to_load; + int result = 0; + struct SMU_DRAMData_TOC *toc; + + toc = (struct SMU_DRAMData_TOC *)iceland_smu->pHeader; + toc->num_entries = 0; + toc->structure_version = 1; + + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry(smumgr, + UCODE_ID_RLC_G, + &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", + return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry(smumgr, + UCODE_ID_CP_CE, + &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", + return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry + (smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry + (smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry + (smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry + (smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry + (smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry + (smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + PP_ASSERT_WITH_CODE( + 0 == iceland_populate_single_firmware_entry + (smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), + "Failed to Get Firmware Entry.\n", return -1); + + if (!iceland_is_smc_ram_running(smumgr)) { + result = iceland_smu_upload_firmware_image(smumgr); + if (result) + return result; + + result = iceland_smu_start_smc(smumgr); + if (result) + return result; + } + + iceland_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_DRV_DRAM_ADDR_HI, + iceland_smu->header_buffer.mc_addr_high); + + iceland_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_DRV_DRAM_ADDR_LO, + iceland_smu->header_buffer.mc_addr_low); + + fw_to_load = UCODE_ID_RLC_G_MASK + + UCODE_ID_SDMA0_MASK + + UCODE_ID_SDMA1_MASK + + UCODE_ID_CP_CE_MASK + + UCODE_ID_CP_ME_MASK + + UCODE_ID_CP_PFP_MASK + + UCODE_ID_CP_MEC_MASK + + UCODE_ID_CP_MEC_JT1_MASK + + UCODE_ID_CP_MEC_JT2_MASK; + + PP_ASSERT_WITH_CODE( + 0 == iceland_send_msg_to_smc_with_parameter( + smumgr, PPSMC_MSG_LoadUcodes, fw_to_load), + "Fail to Request SMU Load uCode", return 0); + + return result; +} + +static int iceland_request_smu_load_specific_fw(struct pp_smumgr *smumgr, + uint32_t firmwareType) +{ + return 0; +} + +static int iceland_start_smu(struct pp_smumgr *smumgr) +{ + int result; + + result = iceland_smu_upload_firmware_image(smumgr); + if (result) + return result; + + result = iceland_smu_start_smc(smumgr); + if (result) + return result; + + result = iceland_request_smu_reload_fw(smumgr); + + return result; +} + +/** + * Write a 32bit value to the SMC SRAM space. + * ALL PARAMETERS ARE IN HOST BYTE ORDER. + * @param smumgr the address of the powerplay hardware manager. + * @param smcAddress the address in the SMC RAM to access. + * @param value to write to the SMC SRAM. + */ +static int iceland_smu_init(struct pp_smumgr *smumgr) +{ + struct iceland_smumgr *iceland_smu; + uint64_t mc_addr = 0; + + /* Allocate memory for backend private data */ + iceland_smu = (struct iceland_smumgr *)(smumgr->backend); + iceland_smu->header_buffer.data_size = + ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; + + smu_allocate_memory(smumgr->device, + iceland_smu->header_buffer.data_size, + CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, + PAGE_SIZE, + &mc_addr, + &iceland_smu->header_buffer.kaddr, + &iceland_smu->header_buffer.handle); + + iceland_smu->pHeader = iceland_smu->header_buffer.kaddr; + iceland_smu->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr); + iceland_smu->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr); + + PP_ASSERT_WITH_CODE((NULL != iceland_smu->pHeader), + "Out of memory.", + kfree(smumgr->backend); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)iceland_smu->header_buffer.handle); + return -1); + + return 0; +} + +static const struct pp_smumgr_func iceland_smu_funcs = { + .smu_init = &iceland_smu_init, + .smu_fini = &iceland_smu_fini, + .start_smu = &iceland_start_smu, + .check_fw_load_finish = &iceland_check_fw_load_finish, + .request_smu_load_fw = &iceland_request_smu_reload_fw, + .request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw, + .send_msg_to_smc = &iceland_send_msg_to_smc, + .send_msg_to_smc_with_parameter = &iceland_send_msg_to_smc_with_parameter, + .download_pptable_settings = NULL, + .upload_pptable_settings = NULL, +}; + +int iceland_smum_init(struct pp_smumgr *smumgr) +{ + struct iceland_smumgr *iceland_smu = NULL; + + iceland_smu = kzalloc(sizeof(struct iceland_smumgr), GFP_KERNEL); + + if (iceland_smu == NULL) + return -ENOMEM; + + smumgr->backend = iceland_smu; + smumgr->smumgr_funcs = &iceland_smu_funcs; + + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h new file mode 100644 index 000000000000..62009a7ae827 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h @@ -0,0 +1,64 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Author: Huang Rui <ray.huang@amd.com> + * + */ + +#ifndef _ICELAND_SMUMGR_H_ +#define _ICELAND_SMUMGR_H_ + +struct iceland_buffer_entry { + uint32_t data_size; + uint32_t mc_addr_low; + uint32_t mc_addr_high; + void *kaddr; + unsigned long handle; +}; + +/* Iceland only has header_buffer, don't have smu buffer. */ +struct iceland_smumgr { + uint8_t *pHeader; + uint8_t *pMecImage; + uint32_t ulSoftRegsStart; + + struct iceland_buffer_entry header_buffer; +}; + +extern int iceland_smum_init(struct pp_smumgr *smumgr); +extern int iceland_copy_bytes_to_smc(struct pp_smumgr *smumgr, + uint32_t smcStartAddress, + const uint8_t *src, + uint32_t byteCount, uint32_t limit); + +extern int iceland_smu_start_smc(struct pp_smumgr *smumgr); + +extern int iceland_read_smc_sram_dword(struct pp_smumgr *smumgr, + uint32_t smcAddress, + uint32_t *value, uint32_t limit); +extern int iceland_write_smc_sram_dword(struct pp_smumgr *smumgr, + uint32_t smcAddress, + uint32_t value, uint32_t limit); + +extern bool iceland_is_smc_ram_running(struct pp_smumgr *smumgr); +extern int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 5dba7c509710..704ff4cc0023 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -978,7 +978,7 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr) return 0; } -static const struct pp_smumgr_func ellsemere_smu_funcs = { +static const struct pp_smumgr_func polaris10_smu_funcs = { .smu_init = polaris10_smu_init, .smu_fini = polaris10_smu_fini, .start_smu = polaris10_start_smu, @@ -1001,7 +1001,7 @@ int polaris10_smum_init(struct pp_smumgr *smumgr) return -1; smumgr->backend = polaris10_smu; - smumgr->smumgr_funcs = &ellsemere_smu_funcs; + smumgr->smumgr_funcs = &polaris10_smu_funcs; return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index 7723473e51a0..cf3cabee8918 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -30,6 +30,7 @@ #include "linux/delay.h" #include "cz_smumgr.h" #include "tonga_smumgr.h" +#include "iceland_smumgr.h" #include "fiji_smumgr.h" #include "polaris10_smumgr.h" @@ -58,6 +59,9 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle) break; case AMDGPU_FAMILY_VI: switch (smumgr->chip_id) { + case CHIP_TOPAZ: + iceland_smum_init(smumgr); + break; case CHIP_TONGA: tonga_smum_init(smumgr); break; diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c index ee0a61c2861b..7130b044b004 100644 --- a/drivers/gpu/drm/arc/arcpgu_crtc.c +++ b/drivers/gpu/drm/arc/arcpgu_crtc.c @@ -183,8 +183,6 @@ static void arc_pgu_plane_atomic_update(struct drm_plane *plane, } static const struct drm_plane_helper_funcs arc_pgu_plane_helper_funcs = { - .prepare_fb = NULL, - .cleanup_fb = NULL, .atomic_update = arc_pgu_plane_atomic_update, }; diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 82171d223f2d..c383d724527f 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -91,7 +91,8 @@ static void malidp_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_commit_modeset_disables(drm, state); drm_atomic_helper_commit_modeset_enables(drm, state); - drm_atomic_helper_commit_planes(drm, state, true); + drm_atomic_helper_commit_planes(drm, state, + DRM_PLANE_COMMIT_ACTIVE_ONLY); malidp_atomic_commit_hw_done(state); diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h index 95558fde214b..271d2fb9711c 100644 --- a/drivers/gpu/drm/arm/malidp_drv.h +++ b/drivers/gpu/drm/arm/malidp_drv.h @@ -49,6 +49,6 @@ void malidp_de_planes_destroy(struct drm_device *drm); int malidp_crtc_init(struct drm_device *drm); /* often used combination of rotational bits */ -#define MALIDP_ROTATED_MASK (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270)) +#define MALIDP_ROTATED_MASK (DRM_ROTATE_90 | DRM_ROTATE_270) #endif /* __MALIDP_DRV_H__ */ diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index 725098d6179a..82c193e5e0d6 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -108,7 +108,7 @@ static int malidp_de_plane_check(struct drm_plane *plane, return -EINVAL; /* packed RGB888 / BGR888 can't be rotated or flipped */ - if (state->rotation != BIT(DRM_ROTATE_0) && + if (state->rotation != DRM_ROTATE_0 && (state->fb->pixel_format == DRM_FORMAT_RGB888 || state->fb->pixel_format == DRM_FORMAT_BGR888)) return -EINVAL; @@ -188,9 +188,9 @@ static void malidp_de_plane_update(struct drm_plane *plane, /* setup the rotation and axis flip bits */ if (plane->state->rotation & DRM_ROTATE_MASK) val = ilog2(plane->state->rotation & DRM_ROTATE_MASK) << LAYER_ROT_OFFSET; - if (plane->state->rotation & BIT(DRM_REFLECT_X)) + if (plane->state->rotation & DRM_REFLECT_X) val |= LAYER_V_FLIP; - if (plane->state->rotation & BIT(DRM_REFLECT_Y)) + if (plane->state->rotation & DRM_REFLECT_Y) val |= LAYER_H_FLIP; /* set the 'enable layer' bit */ @@ -255,12 +255,12 @@ int malidp_de_planes_init(struct drm_device *drm) goto cleanup; if (!drm->mode_config.rotation_property) { - unsigned long flags = BIT(DRM_ROTATE_0) | - BIT(DRM_ROTATE_90) | - BIT(DRM_ROTATE_180) | - BIT(DRM_ROTATE_270) | - BIT(DRM_REFLECT_X) | - BIT(DRM_REFLECT_Y); + unsigned long flags = DRM_ROTATE_0 | + DRM_ROTATE_90 | + DRM_ROTATE_180 | + DRM_ROTATE_270 | + DRM_REFLECT_X | + DRM_REFLECT_Y; drm->mode_config.rotation_property = drm_mode_create_rotation_property(drm, flags); } @@ -268,7 +268,7 @@ int malidp_de_planes_init(struct drm_device *drm) if (drm->mode_config.rotation_property && (id != DE_SMART)) drm_object_attach_property(&plane->base.base, drm->mode_config.rotation_property, - BIT(DRM_ROTATE_0)); + DRM_ROTATE_0); drm_plane_helper_add(&plane->base, &malidp_de_plane_helper_funcs); diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index f5ebdd681445..1e0e68f608e4 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -211,7 +211,7 @@ static struct drm_driver armada_drm_driver = { .desc = "Armada SoC DRM", .date = "20120730", .driver_features = DRIVER_GEM | DRIVER_MODESET | - DRIVER_HAVE_IRQ | DRIVER_PRIME, + DRIVER_PRIME, .ioctls = armada_ioctls, .fops = &armada_drm_fops, }; diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index 7d03c51abcb9..ca73ad8614fe 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c @@ -7,7 +7,6 @@ * published by the Free Software Foundation. */ #include <linux/errno.h> -#include <linux/fb.h> #include <linux/kernel.h> #include <linux/module.h> diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index 1ee707ef6b8d..152b4e716269 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -121,7 +121,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, int ret; ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip, - BIT(DRM_ROTATE_0), + DRM_ROTATE_0, 0, INT_MAX, true, false, &visible); if (ret) return ret; diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c index c017a9330a18..7a86e24e2687 100644 --- a/drivers/gpu/drm/ast/ast_fb.c +++ b/drivers/gpu/drm/ast/ast_fb.c @@ -33,7 +33,6 @@ #include <linux/tty.h> #include <linux/sysrq.h> #include <linux/delay.h> -#include <linux/fb.h> #include <linux/init.h> diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index d4a3d61b7b06..8e7483d90c47 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -457,7 +457,7 @@ atmel_hlcdc_dc_atomic_complete(struct atmel_hlcdc_dc_commit *commit) /* Apply the atomic update. */ drm_atomic_helper_commit_modeset_disables(dev, old_state); - drm_atomic_helper_commit_planes(dev, old_state, false); + drm_atomic_helper_commit_planes(dev, old_state, 0); drm_atomic_helper_commit_modeset_enables(dev, old_state); drm_atomic_helper_wait_for_vblanks(dev, old_state); diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index 016c191221f3..72e6b7dd457b 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c @@ -393,7 +393,7 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane, if ((state->base.fb->pixel_format == DRM_FORMAT_YUV422 || state->base.fb->pixel_format == DRM_FORMAT_NV61) && - (state->base.rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270)))) + (state->base.rotation & (DRM_ROTATE_90 | DRM_ROTATE_270))) cfg |= ATMEL_HLCDC_YUV422ROT; atmel_hlcdc_layer_update_cfg(&plane->layer, @@ -628,7 +628,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, /* * Swap width and size in case of 90 or 270 degrees rotation */ - if (state->base.rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) { + if (state->base.rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)) { tmp = state->crtc_w; state->crtc_w = state->crtc_h; state->crtc_h = tmp; @@ -677,7 +677,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, return -EINVAL; switch (state->base.rotation & DRM_ROTATE_MASK) { - case BIT(DRM_ROTATE_90): + case DRM_ROTATE_90: offset = ((y_offset + state->src_y + patched_src_w - 1) / ydiv) * fb->pitches[i]; offset += ((x_offset + state->src_x) / xdiv) * @@ -686,7 +686,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, fb->pitches[i]; state->pstride[i] = -fb->pitches[i] - state->bpp[i]; break; - case BIT(DRM_ROTATE_180): + case DRM_ROTATE_180: offset = ((y_offset + state->src_y + patched_src_h - 1) / ydiv) * fb->pitches[i]; offset += ((x_offset + state->src_x + patched_src_w - 1) / @@ -695,7 +695,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, state->bpp[i]) - fb->pitches[i]; state->pstride[i] = -2 * state->bpp[i]; break; - case BIT(DRM_ROTATE_270): + case DRM_ROTATE_270: offset = ((y_offset + state->src_y) / ydiv) * fb->pitches[i]; offset += ((x_offset + state->src_x + patched_src_h - 1) / @@ -705,7 +705,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, (2 * state->bpp[i]); state->pstride[i] = fb->pitches[i] - state->bpp[i]; break; - case BIT(DRM_ROTATE_0): + case DRM_ROTATE_0: default: offset = ((y_offset + state->src_y) / ydiv) * fb->pitches[i]; @@ -755,7 +755,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, } static int atmel_hlcdc_plane_prepare_fb(struct drm_plane *p, - const struct drm_plane_state *new_state) + struct drm_plane_state *new_state) { /* * FIXME: we should avoid this const -> non-const cast but it's @@ -780,7 +780,7 @@ static int atmel_hlcdc_plane_prepare_fb(struct drm_plane *p, } static void atmel_hlcdc_plane_cleanup_fb(struct drm_plane *p, - const struct drm_plane_state *old_state) + struct drm_plane_state *old_state) { /* * FIXME: we should avoid this const -> non-const cast but it's @@ -905,7 +905,7 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane, if (desc->layout.xstride && desc->layout.pstride) drm_object_attach_property(&plane->base.base, plane->base.dev->mode_config.rotation_property, - BIT(DRM_ROTATE_0)); + DRM_ROTATE_0); if (desc->layout.csc) { /* @@ -1056,10 +1056,10 @@ atmel_hlcdc_plane_create_properties(struct drm_device *dev) dev->mode_config.rotation_property = drm_mode_create_rotation_property(dev, - BIT(DRM_ROTATE_0) | - BIT(DRM_ROTATE_90) | - BIT(DRM_ROTATE_180) | - BIT(DRM_ROTATE_270)); + DRM_ROTATE_0 | + DRM_ROTATE_90 | + DRM_ROTATE_180 | + DRM_ROTATE_270); if (!dev->mode_config.rotation_property) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h index 19b5adaebe24..32dfe418cc98 100644 --- a/drivers/gpu/drm/bochs/bochs.h +++ b/drivers/gpu/drm/bochs/bochs.h @@ -1,5 +1,4 @@ #include <linux/io.h> -#include <linux/fb.h> #include <linux/console.h> #include <drm/drmP.h> diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c index abace82de6ea..277654abe0f7 100644 --- a/drivers/gpu/drm/bochs/bochs_drv.c +++ b/drivers/gpu/drm/bochs/bochs_drv.c @@ -8,6 +8,7 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/slab.h> +#include <drm/drm_fb_helper.h> #include "bochs.h" @@ -153,7 +154,7 @@ static int bochs_kick_out_firmware_fb(struct pci_dev *pdev) ap->ranges[0].base = pci_resource_start(pdev, 0); ap->ranges[0].size = pci_resource_len(pdev, 0); - remove_conflicting_framebuffers(ap, "bochsdrmfb", false); + drm_fb_helper_remove_conflicting_framebuffers(ap, "bochsdrmfb", false); kfree(ap); return 0; diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index 32715daf73cb..efac8aba6776 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -97,6 +97,83 @@ static int analogix_dp_detect_hpd(struct analogix_dp_device *dp) return 0; } +int analogix_dp_enable_psr(struct device *dev) +{ + struct analogix_dp_device *dp = dev_get_drvdata(dev); + struct edp_vsc_psr psr_vsc; + + if (!dp->psr_support) + return -EINVAL; + + /* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */ + memset(&psr_vsc, 0, sizeof(psr_vsc)); + psr_vsc.sdp_header.HB0 = 0; + psr_vsc.sdp_header.HB1 = 0x7; + psr_vsc.sdp_header.HB2 = 0x2; + psr_vsc.sdp_header.HB3 = 0x8; + + psr_vsc.DB0 = 0; + psr_vsc.DB1 = EDP_VSC_PSR_STATE_ACTIVE | EDP_VSC_PSR_CRC_VALUES_VALID; + + analogix_dp_send_psr_spd(dp, &psr_vsc); + return 0; +} +EXPORT_SYMBOL_GPL(analogix_dp_enable_psr); + +int analogix_dp_disable_psr(struct device *dev) +{ + struct analogix_dp_device *dp = dev_get_drvdata(dev); + struct edp_vsc_psr psr_vsc; + + if (!dp->psr_support) + return -EINVAL; + + /* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */ + memset(&psr_vsc, 0, sizeof(psr_vsc)); + psr_vsc.sdp_header.HB0 = 0; + psr_vsc.sdp_header.HB1 = 0x7; + psr_vsc.sdp_header.HB2 = 0x2; + psr_vsc.sdp_header.HB3 = 0x8; + + psr_vsc.DB0 = 0; + psr_vsc.DB1 = 0; + + analogix_dp_send_psr_spd(dp, &psr_vsc); + return 0; +} +EXPORT_SYMBOL_GPL(analogix_dp_disable_psr); + +static bool analogix_dp_detect_sink_psr(struct analogix_dp_device *dp) +{ + unsigned char psr_version; + + analogix_dp_read_byte_from_dpcd(dp, DP_PSR_SUPPORT, &psr_version); + dev_dbg(dp->dev, "Panel PSR version : %x\n", psr_version); + + return (psr_version & DP_PSR_IS_SUPPORTED) ? true : false; +} + +static void analogix_dp_enable_sink_psr(struct analogix_dp_device *dp) +{ + unsigned char psr_en; + + /* Disable psr function */ + analogix_dp_read_byte_from_dpcd(dp, DP_PSR_EN_CFG, &psr_en); + psr_en &= ~DP_PSR_ENABLE; + analogix_dp_write_byte_to_dpcd(dp, DP_PSR_EN_CFG, psr_en); + + /* Main-Link transmitter remains active during PSR active states */ + psr_en = DP_PSR_MAIN_LINK_ACTIVE | DP_PSR_CRC_VERIFICATION; + analogix_dp_write_byte_to_dpcd(dp, DP_PSR_EN_CFG, psr_en); + + /* Enable psr function */ + psr_en = DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE | + DP_PSR_CRC_VERIFICATION; + analogix_dp_write_byte_to_dpcd(dp, DP_PSR_EN_CFG, psr_en); + + analogix_dp_enable_psr_crc(dp); +} + static unsigned char analogix_dp_calc_edid_check_sum(unsigned char *edid_data) { int i; @@ -921,13 +998,69 @@ static void analogix_dp_commit(struct analogix_dp_device *dp) /* Enable video */ analogix_dp_start_video(dp); + + dp->psr_support = analogix_dp_detect_sink_psr(dp); + if (dp->psr_support) + analogix_dp_enable_sink_psr(dp); +} + +/* + * This function is a bit of a catch-all for panel preparation, hopefully + * simplifying the logic of functions that need to prepare/unprepare the panel + * below. + * + * If @prepare is true, this function will prepare the panel. Conversely, if it + * is false, the panel will be unprepared. + * + * If @is_modeset_prepare is true, the function will disregard the current state + * of the panel and either prepare/unprepare the panel based on @prepare. Once + * it finishes, it will update dp->panel_is_modeset to reflect the current state + * of the panel. + */ +static int analogix_dp_prepare_panel(struct analogix_dp_device *dp, + bool prepare, bool is_modeset_prepare) +{ + int ret = 0; + + if (!dp->plat_data->panel) + return 0; + + mutex_lock(&dp->panel_lock); + + /* + * Exit early if this is a temporary prepare/unprepare and we're already + * modeset (since we neither want to prepare twice or unprepare early). + */ + if (dp->panel_is_modeset && !is_modeset_prepare) + goto out; + + if (prepare) + ret = drm_panel_prepare(dp->plat_data->panel); + else + ret = drm_panel_unprepare(dp->plat_data->panel); + + if (ret) + goto out; + + if (is_modeset_prepare) + dp->panel_is_modeset = prepare; + +out: + mutex_unlock(&dp->panel_lock); + return ret; } int analogix_dp_get_modes(struct drm_connector *connector) { struct analogix_dp_device *dp = to_dp(connector); struct edid *edid = (struct edid *)dp->edid; - int num_modes = 0; + int ret, num_modes = 0; + + ret = analogix_dp_prepare_panel(dp, true, false); + if (ret) { + DRM_ERROR("Failed to prepare panel (%d)\n", ret); + return 0; + } if (analogix_dp_handle_edid(dp) == 0) { drm_mode_connector_update_edid_property(&dp->connector, edid); @@ -940,6 +1073,10 @@ int analogix_dp_get_modes(struct drm_connector *connector) if (dp->plat_data->get_modes) num_modes += dp->plat_data->get_modes(dp->plat_data, connector); + ret = analogix_dp_prepare_panel(dp, false, false); + if (ret) + DRM_ERROR("Failed to unprepare panel (%d)\n", ret); + return num_modes; } @@ -960,11 +1097,23 @@ enum drm_connector_status analogix_dp_detect(struct drm_connector *connector, bool force) { struct analogix_dp_device *dp = to_dp(connector); + enum drm_connector_status status = connector_status_disconnected; + int ret; - if (analogix_dp_detect_hpd(dp)) + ret = analogix_dp_prepare_panel(dp, true, false); + if (ret) { + DRM_ERROR("Failed to prepare panel (%d)\n", ret); return connector_status_disconnected; + } + + if (!analogix_dp_detect_hpd(dp)) + status = connector_status_connected; - return connector_status_connected; + ret = analogix_dp_prepare_panel(dp, false, false); + if (ret) + DRM_ERROR("Failed to unprepare panel (%d)\n", ret); + + return status; } static void analogix_dp_connector_destroy(struct drm_connector *connector) @@ -1035,6 +1184,16 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge) return 0; } +static void analogix_dp_bridge_pre_enable(struct drm_bridge *bridge) +{ + struct analogix_dp_device *dp = bridge->driver_private; + int ret; + + ret = analogix_dp_prepare_panel(dp, true, true); + if (ret) + DRM_ERROR("failed to setup the panel ret = %d\n", ret); +} + static void analogix_dp_bridge_enable(struct drm_bridge *bridge) { struct analogix_dp_device *dp = bridge->driver_private; @@ -1058,6 +1217,7 @@ static void analogix_dp_bridge_enable(struct drm_bridge *bridge) static void analogix_dp_bridge_disable(struct drm_bridge *bridge) { struct analogix_dp_device *dp = bridge->driver_private; + int ret; if (dp->dpms_mode != DRM_MODE_DPMS_ON) return; @@ -1077,6 +1237,10 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge) pm_runtime_put_sync(dp->dev); + ret = analogix_dp_prepare_panel(dp, false, true); + if (ret) + DRM_ERROR("failed to setup the panel ret = %d\n", ret); + dp->dpms_mode = DRM_MODE_DPMS_OFF; } @@ -1165,9 +1329,9 @@ static void analogix_dp_bridge_nop(struct drm_bridge *bridge) } static const struct drm_bridge_funcs analogix_dp_bridge_funcs = { + .pre_enable = analogix_dp_bridge_pre_enable, .enable = analogix_dp_bridge_enable, .disable = analogix_dp_bridge_disable, - .pre_enable = analogix_dp_bridge_nop, .post_disable = analogix_dp_bridge_nop, .mode_set = analogix_dp_bridge_mode_set, .attach = analogix_dp_bridge_attach, @@ -1254,6 +1418,9 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, dp->dev = &pdev->dev; dp->dpms_mode = DRM_MODE_DPMS_OFF; + mutex_init(&dp->panel_lock); + dp->panel_is_modeset = false; + /* * platform dp driver need containor_of the plat_data to get * the driver private data, so we need to store the point of @@ -1333,13 +1500,6 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, phy_power_on(dp->phy); - if (dp->plat_data->panel) { - if (drm_panel_prepare(dp->plat_data->panel)) { - DRM_ERROR("failed to setup the panel\n"); - return -EBUSY; - } - } - analogix_dp_init_dp(dp); ret = devm_request_threaded_irq(&pdev->dev, dp->irq, diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h index b45638043ec4..473b9802b2d6 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h @@ -177,6 +177,10 @@ struct analogix_dp_device { int hpd_gpio; bool force_hpd; unsigned char edid[EDID_BLOCK_LENGTH * 2]; + bool psr_support; + + struct mutex panel_lock; + bool panel_is_modeset; struct analogix_dp_plat_data *plat_data; }; @@ -278,4 +282,8 @@ int analogix_dp_is_video_stream_on(struct analogix_dp_device *dp); void analogix_dp_config_video_slave_mode(struct analogix_dp_device *dp); void analogix_dp_enable_scrambling(struct analogix_dp_device *dp); void analogix_dp_disable_scrambling(struct analogix_dp_device *dp); +void analogix_dp_enable_psr_crc(struct analogix_dp_device *dp); +void analogix_dp_send_psr_spd(struct analogix_dp_device *dp, + struct edp_vsc_psr *vsc); + #endif /* _ANALOGIX_DP_CORE_H */ diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c index 48030f0cf497..52c1b6b2982e 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c @@ -1322,3 +1322,54 @@ void analogix_dp_disable_scrambling(struct analogix_dp_device *dp) reg |= SCRAMBLING_DISABLE; writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); } + +void analogix_dp_enable_psr_crc(struct analogix_dp_device *dp) +{ + writel(PSR_VID_CRC_ENABLE, dp->reg_base + ANALOGIX_DP_CRC_CON); +} + +void analogix_dp_send_psr_spd(struct analogix_dp_device *dp, + struct edp_vsc_psr *vsc) +{ + unsigned int val; + + /* don't send info frame */ + val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); + val &= ~IF_EN; + writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); + + /* configure single frame update mode */ + writel(PSR_FRAME_UP_TYPE_BURST | PSR_CRC_SEL_HARDWARE, + dp->reg_base + ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL); + + /* configure VSC HB0~HB3 */ + writel(vsc->sdp_header.HB0, dp->reg_base + ANALOGIX_DP_SPD_HB0); + writel(vsc->sdp_header.HB1, dp->reg_base + ANALOGIX_DP_SPD_HB1); + writel(vsc->sdp_header.HB2, dp->reg_base + ANALOGIX_DP_SPD_HB2); + writel(vsc->sdp_header.HB3, dp->reg_base + ANALOGIX_DP_SPD_HB3); + + /* configure reused VSC PB0~PB3, magic number from vendor */ + writel(0x00, dp->reg_base + ANALOGIX_DP_SPD_PB0); + writel(0x16, dp->reg_base + ANALOGIX_DP_SPD_PB1); + writel(0xCE, dp->reg_base + ANALOGIX_DP_SPD_PB2); + writel(0x5D, dp->reg_base + ANALOGIX_DP_SPD_PB3); + + /* configure DB0 / DB1 values */ + writel(vsc->DB0, dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB0); + writel(vsc->DB1, dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB1); + + /* set reuse spd inforframe */ + val = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3); + val |= REUSE_SPD_EN; + writel(val, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3); + + /* mark info frame update */ + val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); + val = (val | IF_UP) & ~IF_EN; + writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); + + /* send info frame */ + val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); + val |= IF_EN; + writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); +} diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h index cdcc6c5add5e..40200c652533 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h @@ -22,6 +22,8 @@ #define ANALOGIX_DP_VIDEO_CTL_8 0x3C #define ANALOGIX_DP_VIDEO_CTL_10 0x44 +#define ANALOGIX_DP_SPDIF_AUDIO_CTL_0 0xD8 + #define ANALOGIX_DP_PLL_REG_1 0xfc #define ANALOGIX_DP_PLL_REG_2 0x9e4 #define ANALOGIX_DP_PLL_REG_3 0x9e8 @@ -30,6 +32,21 @@ #define ANALOGIX_DP_PD 0x12c +#define ANALOGIX_DP_IF_TYPE 0x244 +#define ANALOGIX_DP_IF_PKT_DB1 0x254 +#define ANALOGIX_DP_IF_PKT_DB2 0x258 +#define ANALOGIX_DP_SPD_HB0 0x2F8 +#define ANALOGIX_DP_SPD_HB1 0x2FC +#define ANALOGIX_DP_SPD_HB2 0x300 +#define ANALOGIX_DP_SPD_HB3 0x304 +#define ANALOGIX_DP_SPD_PB0 0x308 +#define ANALOGIX_DP_SPD_PB1 0x30C +#define ANALOGIX_DP_SPD_PB2 0x310 +#define ANALOGIX_DP_SPD_PB3 0x314 +#define ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL 0x318 +#define ANALOGIX_DP_VSC_SHADOW_DB0 0x31C +#define ANALOGIX_DP_VSC_SHADOW_DB1 0x320 + #define ANALOGIX_DP_LANE_MAP 0x35C #define ANALOGIX_DP_ANALOG_CTL_1 0x370 @@ -103,6 +120,8 @@ #define ANALOGIX_DP_SOC_GENERAL_CTL 0x800 +#define ANALOGIX_DP_CRC_CON 0x890 + /* ANALOGIX_DP_TX_SW_RESET */ #define RESET_DP_TX (0x1 << 0) @@ -151,6 +170,7 @@ #define VID_CHK_UPDATE_TYPE_SHIFT (4) #define VID_CHK_UPDATE_TYPE_1 (0x1 << 4) #define VID_CHK_UPDATE_TYPE_0 (0x0 << 4) +#define REUSE_SPD_EN (0x1 << 3) /* ANALOGIX_DP_VIDEO_CTL_8 */ #define VID_HRES_TH(x) (((x) & 0xf) << 4) @@ -167,6 +187,12 @@ #define REF_CLK_27M (0x0 << 0) #define REF_CLK_MASK (0x1 << 0) +/* ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL */ +#define PSR_FRAME_UP_TYPE_BURST (0x1 << 0) +#define PSR_FRAME_UP_TYPE_SINGLE (0x0 << 0) +#define PSR_CRC_SEL_HARDWARE (0x1 << 1) +#define PSR_CRC_SEL_MANUALLY (0x0 << 1) + /* ANALOGIX_DP_LANE_MAP */ #define LANE3_MAP_LOGIC_LANE_0 (0x0 << 6) #define LANE3_MAP_LOGIC_LANE_1 (0x1 << 6) @@ -376,4 +402,12 @@ #define VIDEO_MODE_SLAVE_MODE (0x1 << 0) #define VIDEO_MODE_MASTER_MODE (0x0 << 0) +/* ANALOGIX_DP_PKT_SEND_CTL */ +#define IF_UP (0x1 << 4) +#define IF_EN (0x1 << 0) + +/* ANALOGIX_DP_CRC_CON */ +#define PSR_VID_CRC_FLUSH (0x1 << 2) +#define PSR_VID_CRC_ENABLE (0x1 << 0) + #endif /* _ANALOGIX_DP_REG_H */ diff --git a/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c index 122bb015f4a9..8f2d1379c880 100644 --- a/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c +++ b/drivers/gpu/drm/bridge/dw-hdmi-ahb-audio.c @@ -640,7 +640,6 @@ static struct platform_driver snd_dw_hdmi_driver = { .remove = snd_dw_hdmi_remove, .driver = { .name = DRIVER_NAME, - .owner = THIS_MODULE, .pm = PM_OPS, }, }; diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c index 77ab47341658..cdf39aa3943c 100644 --- a/drivers/gpu/drm/bridge/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/dw-hdmi.c @@ -940,10 +940,11 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode) */ /* - * AVI data byte 1 differences: Colorspace in bits 4,5 rather than 5,6, - * active aspect present in bit 6 rather than 4. + * AVI data byte 1 differences: Colorspace in bits 0,1 rather than 5,6, + * scan info in bits 4,5 rather than 0,1 and active aspect present in + * bit 6 rather than 4. */ - val = (frame.colorspace & 3) << 4 | (frame.scan_mode & 0x3); + val = (frame.scan_mode & 3) << 4 | (frame.colorspace & 3); if (frame.active_aspect & 15) val |= HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT; if (frame.top_bar || frame.bottom_bar) diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c index 583b8ce614e3..f1b39a2645cc 100644 --- a/drivers/gpu/drm/bridge/parade-ps8622.c +++ b/drivers/gpu/drm/bridge/parade-ps8622.c @@ -16,7 +16,6 @@ #include <linux/backlight.h> #include <linux/delay.h> #include <linux/err.h> -#include <linux/fb.h> #include <linux/gpio.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index b05f7eae32ce..6c76d125995b 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -57,7 +57,7 @@ static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev) #ifdef CONFIG_X86 primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; #endif - remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary); + drm_fb_helper_remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary); kfree(ap); return 0; diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c index 3b5be7272357..daecf1ad76a4 100644 --- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c +++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c @@ -13,8 +13,6 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_crtc_helper.h> -#include <linux/fb.h> - #include "cirrus_drv.h" static void cirrus_dirty_update(struct cirrus_fbdev *afbdev, diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c index 80446e2d3ab6..76bcb43e7c06 100644 --- a/drivers/gpu/drm/cirrus/cirrus_main.c +++ b/drivers/gpu/drm/cirrus/cirrus_main.c @@ -185,14 +185,23 @@ int cirrus_driver_load(struct drm_device *dev, unsigned long flags) goto out; } + /* + * cirrus_modeset_init() is initializing/registering the emulated fbdev + * and DRM internals can access/test some of the fields in + * mode_config->funcs as part of the fbdev registration process. + * Make sure dev->mode_config.funcs is properly set to avoid + * dereferencing a NULL pointer. + * FIXME: mode_config.funcs assignment should probably be done in + * cirrus_modeset_init() (that's a common pattern seen in other DRM + * drivers). + */ + dev->mode_config.funcs = &cirrus_mode_funcs; r = cirrus_modeset_init(cdev); if (r) { dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r); goto out; } - dev->mode_config.funcs = (void *)&cirrus_mode_funcs; - return 0; out: cirrus_driver_unload(dev); diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c index 605bd243fb36..d621c8a4cf00 100644 --- a/drivers/gpu/drm/drm_agpsupport.c +++ b/drivers/gpu/drm/drm_agpsupport.c @@ -430,9 +430,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev) * intact so it can still be used. It is safe to call this if AGP is disabled or * was already removed. * - * If DRIVER_MODESET is active, nothing is done to protect the modesetting - * resources from getting destroyed. Drivers are responsible of cleaning them up - * during device shutdown. + * Cleanup is only done for drivers who have DRIVER_LEGACY set. */ void drm_legacy_agp_clear(struct drm_device *dev) { @@ -440,7 +438,7 @@ void drm_legacy_agp_clear(struct drm_device *dev) if (!dev->agp) return; - if (drm_core_check_feature(dev, DRIVER_MODESET)) + if (!drm_core_check_feature(dev, DRIVER_LEGACY)) return; list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) { diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index fa3930757972..a5126e5c05ee 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -837,8 +837,9 @@ static int drm_atomic_plane_check(struct drm_plane *plane, /* Check whether this plane supports the fb pixel format. */ ret = drm_plane_check_pixel_format(plane, state->fb->pixel_format); if (ret) { - DRM_DEBUG_ATOMIC("Invalid pixel format %s\n", - drm_get_format_name(state->fb->pixel_format)); + char *format_name = drm_get_format_name(state->fb->pixel_format); + DRM_DEBUG_ATOMIC("Invalid pixel format %s\n", format_name); + kfree(format_name); return ret; } @@ -1608,7 +1609,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, struct drm_crtc_state *crtc_state; unsigned plane_mask; int ret = 0; - unsigned int i, j; + unsigned int i, j, k; /* disallow for drivers not supporting atomic: */ if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) @@ -1690,6 +1691,15 @@ retry: goto out; } + for (k = 0; k < obj->properties->count; k++) + if (obj->properties->properties[k]->base.id == prop_id) + break; + + if (k == obj->properties->count) { + ret = -EINVAL; + goto out; + } + prop = drm_property_find(dev, prop_id); if (!prop) { drm_mode_object_unreference(obj); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 20be86d89a20..6fdd7ba47896 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -594,7 +594,7 @@ drm_atomic_helper_check_planes(struct drm_device *dev, struct drm_plane_state *plane_state; int i, ret = 0; - ret = drm_atomic_helper_normalize_zpos(dev, state); + ret = drm_atomic_normalize_zpos(dev, state); if (ret) return ret; @@ -749,6 +749,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) /* Right function depends upon target state. */ if (crtc->state->enable && funcs->prepare) funcs->prepare(crtc); + else if (funcs->atomic_disable) + funcs->atomic_disable(crtc, old_crtc_state); else if (funcs->disable) funcs->disable(crtc); else @@ -886,8 +888,12 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) * Each encoder has at most one connector (since we always steal * it away), so we won't call mode_set hooks twice. */ - if (funcs && funcs->mode_set) + if (funcs && funcs->atomic_mode_set) { + funcs->atomic_mode_set(encoder, new_crtc_state, + connector->state); + } else if (funcs && funcs->mode_set) { funcs->mode_set(encoder, mode, adjusted_mode); + } drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode); } @@ -1142,7 +1148,8 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks); * * drm_atomic_helper_commit_modeset_enables(dev, state); * - * drm_atomic_helper_commit_planes(dev, state, true); + * drm_atomic_helper_commit_planes(dev, state, + * DRM_PLANE_COMMIT_ACTIVE_ONLY); * * for committing the atomic update to hardware. See the kerneldoc entries for * these three functions for more details. @@ -1153,7 +1160,7 @@ void drm_atomic_helper_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_commit_modeset_disables(dev, state); - drm_atomic_helper_commit_planes(dev, state, false); + drm_atomic_helper_commit_planes(dev, state, 0); drm_atomic_helper_commit_modeset_enables(dev, state); @@ -1631,6 +1638,9 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev, funcs = plane->helper_private; + if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc)) + continue; + if (funcs->prepare_fb) { ret = funcs->prepare_fb(plane, plane_state); if (ret) @@ -1647,11 +1657,13 @@ fail: if (j >= i) continue; + if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc)) + continue; + funcs = plane->helper_private; if (funcs->cleanup_fb) funcs->cleanup_fb(plane, plane_state); - } return ret; @@ -1667,7 +1679,7 @@ bool plane_crtc_active(struct drm_plane_state *state) * drm_atomic_helper_commit_planes - commit plane state * @dev: DRM device * @old_state: atomic state object with old state structures - * @active_only: Only commit on active CRTC if set + * @flags: flags for committing plane state * * This function commits the new plane state using the plane and atomic helper * functions for planes and crtcs. It assumes that the atomic state has already @@ -1687,25 +1699,34 @@ bool plane_crtc_active(struct drm_plane_state *state) * most drivers don't need to be immediately notified of plane updates for a * disabled CRTC. * - * Unless otherwise needed, drivers are advised to set the @active_only - * parameters to true in order not to receive plane update notifications related - * to a disabled CRTC. This avoids the need to manually ignore plane updates in + * Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in + * @flags in order not to receive plane update notifications related to a + * disabled CRTC. This avoids the need to manually ignore plane updates in * driver code when the driver and/or hardware can't or just don't need to deal * with updates on disabled CRTCs, for example when supporting runtime PM. * - * The drm_atomic_helper_commit() default implementation only sets @active_only - * to false to most closely match the behaviour of the legacy helpers. This should - * not be copied blindly by drivers. + * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant + * display controllers require to disable a CRTC's planes when the CRTC is + * disabled. This function would skip the ->atomic_disable call for a plane if + * the CRTC of the old plane state needs a modesetting operation. Of course, + * the drivers need to disable the planes in their CRTC disable callbacks + * since no one else would do that. + * + * The drm_atomic_helper_commit() default implementation doesn't set the + * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers. + * This should not be copied blindly by drivers. */ void drm_atomic_helper_commit_planes(struct drm_device *dev, struct drm_atomic_state *old_state, - bool active_only) + uint32_t flags) { struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; struct drm_plane *plane; struct drm_plane_state *old_plane_state; int i; + bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY; + bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET; for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { const struct drm_crtc_helper_funcs *funcs; @@ -1749,10 +1770,19 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev, /* * Special-case disabling the plane if drivers support it. */ - if (disabling && funcs->atomic_disable) + if (disabling && funcs->atomic_disable) { + struct drm_crtc_state *crtc_state; + + crtc_state = old_plane_state->crtc->state; + + if (drm_atomic_crtc_needs_modeset(crtc_state) && + no_disable) + continue; + funcs->atomic_disable(plane, old_plane_state); - else if (plane->state->crtc || disabling) + } else if (plane->state->crtc || disabling) { funcs->atomic_update(plane, old_plane_state); + } } for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { @@ -1831,12 +1861,12 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc); /** * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes - * @crtc: CRTC + * @old_crtc_state: atomic state object with the old CRTC state * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks * * Disables all planes associated with the given CRTC. This can be - * used for instance in the CRTC helper disable callback to disable - * all planes before shutting down the display pipeline. + * used for instance in the CRTC helper atomic_disable callback to disable + * all planes. * * If the atomic-parameter is set the function calls the CRTC's * atomic_begin hook before and atomic_flush hook after disabling the @@ -1845,9 +1875,11 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc); * It is a bug to call this function without having implemented the * ->atomic_disable() plane hook. */ -void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc, - bool atomic) +void +drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state, + bool atomic) { + struct drm_crtc *crtc = old_crtc_state->crtc; const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; struct drm_plane *plane; @@ -1855,11 +1887,11 @@ void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc, if (atomic && crtc_funcs && crtc_funcs->atomic_begin) crtc_funcs->atomic_begin(crtc, NULL); - drm_for_each_plane(plane, crtc->dev) { + drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) { const struct drm_plane_helper_funcs *plane_funcs = plane->helper_private; - if (plane->state->crtc != crtc || !plane_funcs) + if (!plane_funcs) continue; WARN_ON(!plane_funcs->atomic_disable); @@ -1894,6 +1926,9 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev, for_each_plane_in_state(old_state, plane, plane_state, i) { const struct drm_plane_helper_funcs *funcs; + if (!drm_atomic_helper_framebuffer_changed(dev, old_state, plane_state->crtc)) + continue; + funcs = plane->helper_private; if (funcs->cleanup_fb) @@ -2354,7 +2389,7 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set, primary_state->crtc_h = vdisplay; primary_state->src_x = set->x << 16; primary_state->src_y = set->y << 16; - if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) { + if (primary_state->rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)) { primary_state->src_w = vdisplay << 16; primary_state->src_h = hdisplay << 16; } else { @@ -3039,7 +3074,7 @@ void drm_atomic_helper_plane_reset(struct drm_plane *plane) if (plane->state) { plane->state->plane = plane; - plane->state->rotation = BIT(DRM_ROTATE_0); + plane->state->rotation = DRM_ROTATE_0; } } EXPORT_SYMBOL(drm_atomic_helper_plane_reset); diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index 4153e8a193af..6b143514a566 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c @@ -251,7 +251,7 @@ void drm_master_release(struct drm_file *file_priv) if (!drm_is_current_master(file_priv)) goto out; - if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + if (drm_core_check_feature(dev, DRIVER_LEGACY)) { /* * Since the master is disappearing, so is the * possibility to lock. diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index f3c0942bd756..0813b7e021be 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -193,8 +193,7 @@ done: } /** - * drm_atomic_helper_normalize_zpos - calculate normalized zpos values for all - * crtcs + * drm_atomic_normalize_zpos - calculate normalized zpos values for all crtcs * @dev: DRM device * @state: atomic state of DRM device * @@ -205,8 +204,8 @@ done: * RETURNS * Zero for success or -errno */ -int drm_atomic_helper_normalize_zpos(struct drm_device *dev, - struct drm_atomic_state *state) +int drm_atomic_normalize_zpos(struct drm_device *dev, + struct drm_atomic_state *state) { struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; @@ -236,3 +235,4 @@ int drm_atomic_helper_normalize_zpos(struct drm_device *dev, } return 0; } +EXPORT_SYMBOL(drm_atomic_normalize_zpos); diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index 255543086590..484046664d6c 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -98,11 +98,11 @@ EXPORT_SYMBOL(drm_bridge_remove); * @dev: DRM device * @bridge: bridge control structure * - * called by a kms driver to link one of our encoder/bridge to the given + * Called by a kms driver to link one of our encoder/bridge to the given * bridge. * * Note that setting up links between the bridge and our encoder/bridge - * objects needs to be handled by the kms driver itself + * objects needs to be handled by the kms driver itself. * * RETURNS: * Zero on success, error code on failure @@ -125,6 +125,31 @@ int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge) EXPORT_SYMBOL(drm_bridge_attach); /** + * drm_bridge_detach - deassociate given bridge from its DRM device + * + * @bridge: bridge control structure + * + * Called by a kms driver to unlink the given bridge from its DRM device. + * + * Note that tearing down links between the bridge and our encoder/bridge + * objects needs to be handled by the kms driver itself. + */ +void drm_bridge_detach(struct drm_bridge *bridge) +{ + if (WARN_ON(!bridge)) + return; + + if (WARN_ON(!bridge->dev)) + return; + + if (bridge->funcs->detach) + bridge->funcs->detach(bridge); + + bridge->dev = NULL; +} +EXPORT_SYMBOL(drm_bridge_detach); + +/** * DOC: bridge callbacks * * The &drm_bridge_funcs ops are populated by the bridge driver. The DRM diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index c3a12cd8bd0d..32191513e82d 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -397,7 +397,7 @@ int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data, return -EPERM; if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && - drm_core_check_feature(dev, DRIVER_MODESET)) + !drm_core_check_feature(dev, DRIVER_LEGACY)) return -EINVAL; err = drm_addmap_core(dev, map->offset, map->size, map->type, @@ -443,7 +443,7 @@ int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data, int i; if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && - drm_core_check_feature(dev, DRIVER_MODESET)) + !drm_core_check_feature(dev, DRIVER_LEGACY)) return -EINVAL; idx = map->offset; @@ -545,7 +545,7 @@ EXPORT_SYMBOL(drm_legacy_rmmap_locked); void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map) { if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && - drm_core_check_feature(dev, DRIVER_MODESET)) + !drm_core_check_feature(dev, DRIVER_LEGACY)) return; mutex_lock(&dev->struct_mutex); @@ -558,7 +558,7 @@ void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master) { struct drm_map_list *r_list, *list_temp; - if (drm_core_check_feature(dev, DRIVER_MODESET)) + if (!drm_core_check_feature(dev, DRIVER_LEGACY)) return; mutex_lock(&dev->struct_mutex); @@ -595,7 +595,7 @@ int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data, int ret; if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && - drm_core_check_feature(dev, DRIVER_MODESET)) + !drm_core_check_feature(dev, DRIVER_LEGACY)) return -EINVAL; mutex_lock(&dev->struct_mutex); @@ -1220,7 +1220,7 @@ int drm_legacy_addbufs(struct drm_device *dev, void *data, struct drm_buf_desc *request = data; int ret; - if (drm_core_check_feature(dev, DRIVER_MODESET)) + if (!drm_core_check_feature(dev, DRIVER_LEGACY)) return -EINVAL; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) @@ -1266,7 +1266,7 @@ int drm_legacy_infobufs(struct drm_device *dev, void *data, int i; int count; - if (drm_core_check_feature(dev, DRIVER_MODESET)) + if (!drm_core_check_feature(dev, DRIVER_LEGACY)) return -EINVAL; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) @@ -1347,7 +1347,7 @@ int drm_legacy_markbufs(struct drm_device *dev, void *data, int order; struct drm_buf_entry *entry; - if (drm_core_check_feature(dev, DRIVER_MODESET)) + if (!drm_core_check_feature(dev, DRIVER_LEGACY)) return -EINVAL; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) @@ -1395,7 +1395,7 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data, int idx; struct drm_buf *buf; - if (drm_core_check_feature(dev, DRIVER_MODESET)) + if (!drm_core_check_feature(dev, DRIVER_LEGACY)) return -EINVAL; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) @@ -1450,7 +1450,7 @@ int drm_legacy_mapbufs(struct drm_device *dev, void *data, struct drm_buf_map *request = data; int i; - if (drm_core_check_feature(dev, DRIVER_MODESET)) + if (!drm_core_check_feature(dev, DRIVER_LEGACY)) return -EINVAL; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) @@ -1530,7 +1530,7 @@ int drm_legacy_mapbufs(struct drm_device *dev, void *data, int drm_legacy_dma_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - if (drm_core_check_feature(dev, DRIVER_MODESET)) + if (!drm_core_check_feature(dev, DRIVER_LEGACY)) return -EINVAL; if (dev->driver->dma_ioctl) diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c new file mode 100644 index 000000000000..26bb78c76481 --- /dev/null +++ b/drivers/gpu/drm/drm_connector.c @@ -0,0 +1,1123 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include <drm/drmP.h> +#include <drm/drm_connector.h> +#include <drm/drm_edid.h> + +#include "drm_crtc_internal.h" +#include "drm_internal.h" + +/** + * DOC: overview + * + * In DRM connectors are the general abstraction for display sinks, and include + * als fixed panels or anything else that can display pixels in some form. As + * opposed to all other KMS objects representing hardware (like CRTC, encoder or + * plane abstractions) connectors can be hotplugged and unplugged at runtime. + * Hence they are reference-counted using drm_connector_reference() and + * drm_connector_unreference(). + * + * KMS driver must create, initialize, register and attach at a struct + * &drm_connector for each such sink. The instance is created as other KMS + * objects and initialized by setting the following fields. + * + * The connector is then registered with a call to drm_connector_init() with a + * pointer to the connector functions and a connector type, and exposed through + * sysfs with a call to drm_connector_register(). + * + * Connectors must be attached to an encoder to be used. For devices that map + * connectors to encoders 1:1, the connector should be attached at + * initialization time with a call to drm_mode_connector_attach_encoder(). The + * driver must also set the struct &drm_connector encoder field to point to the + * attached encoder. + * + * For connectors which are not fixed (like built-in panels) the driver needs to + * support hotplug notifications. The simplest way to do that is by using the + * probe helpers, see drm_kms_helper_poll_init() for connectors which don't have + * hardware support for hotplug interrupts. Connectors with hardware hotplug + * support can instead use e.g. drm_helper_hpd_irq_event(). + */ + +struct drm_conn_prop_enum_list { + int type; + const char *name; + struct ida ida; +}; + +/* + * Connector and encoder types. + */ +static struct drm_conn_prop_enum_list drm_connector_enum_list[] = { + { DRM_MODE_CONNECTOR_Unknown, "Unknown" }, + { DRM_MODE_CONNECTOR_VGA, "VGA" }, + { DRM_MODE_CONNECTOR_DVII, "DVI-I" }, + { DRM_MODE_CONNECTOR_DVID, "DVI-D" }, + { DRM_MODE_CONNECTOR_DVIA, "DVI-A" }, + { DRM_MODE_CONNECTOR_Composite, "Composite" }, + { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" }, + { DRM_MODE_CONNECTOR_LVDS, "LVDS" }, + { DRM_MODE_CONNECTOR_Component, "Component" }, + { DRM_MODE_CONNECTOR_9PinDIN, "DIN" }, + { DRM_MODE_CONNECTOR_DisplayPort, "DP" }, + { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" }, + { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" }, + { DRM_MODE_CONNECTOR_TV, "TV" }, + { DRM_MODE_CONNECTOR_eDP, "eDP" }, + { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" }, + { DRM_MODE_CONNECTOR_DSI, "DSI" }, + { DRM_MODE_CONNECTOR_DPI, "DPI" }, +}; + +void drm_connector_ida_init(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++) + ida_init(&drm_connector_enum_list[i].ida); +} + +void drm_connector_ida_destroy(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++) + ida_destroy(&drm_connector_enum_list[i].ida); +} + +/** + * drm_connector_get_cmdline_mode - reads the user's cmdline mode + * @connector: connector to quwery + * + * The kernel supports per-connector configuration of its consoles through + * use of the video= parameter. This function parses that option and + * extracts the user's specified mode (or enable/disable status) for a + * particular connector. This is typically only used during the early fbdev + * setup. + */ +static void drm_connector_get_cmdline_mode(struct drm_connector *connector) +{ + struct drm_cmdline_mode *mode = &connector->cmdline_mode; + char *option = NULL; + + if (fb_get_options(connector->name, &option)) + return; + + if (!drm_mode_parse_command_line_for_connector(option, + connector, + mode)) + return; + + if (mode->force) { + const char *s; + + switch (mode->force) { + case DRM_FORCE_OFF: + s = "OFF"; + break; + case DRM_FORCE_ON_DIGITAL: + s = "ON - dig"; + break; + default: + case DRM_FORCE_ON: + s = "ON"; + break; + } + + DRM_INFO("forcing %s connector %s\n", connector->name, s); + connector->force = mode->force; + } + + DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n", + connector->name, + mode->xres, mode->yres, + mode->refresh_specified ? mode->refresh : 60, + mode->rb ? " reduced blanking" : "", + mode->margins ? " with margins" : "", + mode->interlace ? " interlaced" : ""); +} + +static void drm_connector_free(struct kref *kref) +{ + struct drm_connector *connector = + container_of(kref, struct drm_connector, base.refcount); + struct drm_device *dev = connector->dev; + + drm_mode_object_unregister(dev, &connector->base); + connector->funcs->destroy(connector); +} + +/** + * drm_connector_init - Init a preallocated connector + * @dev: DRM device + * @connector: the connector to init + * @funcs: callbacks for this connector + * @connector_type: user visible type of the connector + * + * Initialises a preallocated connector. Connectors should be + * subclassed as part of driver connector objects. + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_connector_init(struct drm_device *dev, + struct drm_connector *connector, + const struct drm_connector_funcs *funcs, + int connector_type) +{ + struct drm_mode_config *config = &dev->mode_config; + int ret; + struct ida *connector_ida = + &drm_connector_enum_list[connector_type].ida; + + drm_modeset_lock_all(dev); + + ret = drm_mode_object_get_reg(dev, &connector->base, + DRM_MODE_OBJECT_CONNECTOR, + false, drm_connector_free); + if (ret) + goto out_unlock; + + connector->base.properties = &connector->properties; + connector->dev = dev; + connector->funcs = funcs; + + ret = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL); + if (ret < 0) + goto out_put; + connector->index = ret; + ret = 0; + + connector->connector_type = connector_type; + connector->connector_type_id = + ida_simple_get(connector_ida, 1, 0, GFP_KERNEL); + if (connector->connector_type_id < 0) { + ret = connector->connector_type_id; + goto out_put_id; + } + connector->name = + kasprintf(GFP_KERNEL, "%s-%d", + drm_connector_enum_list[connector_type].name, + connector->connector_type_id); + if (!connector->name) { + ret = -ENOMEM; + goto out_put_type_id; + } + + INIT_LIST_HEAD(&connector->probed_modes); + INIT_LIST_HEAD(&connector->modes); + connector->edid_blob_ptr = NULL; + connector->status = connector_status_unknown; + + drm_connector_get_cmdline_mode(connector); + + /* We should add connectors at the end to avoid upsetting the connector + * index too much. */ + list_add_tail(&connector->head, &config->connector_list); + config->num_connector++; + + if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL) + drm_object_attach_property(&connector->base, + config->edid_property, + 0); + + drm_object_attach_property(&connector->base, + config->dpms_property, 0); + + if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { + drm_object_attach_property(&connector->base, config->prop_crtc_id, 0); + } + + connector->debugfs_entry = NULL; +out_put_type_id: + if (ret) + ida_remove(connector_ida, connector->connector_type_id); +out_put_id: + if (ret) + ida_remove(&config->connector_ida, connector->index); +out_put: + if (ret) + drm_mode_object_unregister(dev, &connector->base); + +out_unlock: + drm_modeset_unlock_all(dev); + + return ret; +} +EXPORT_SYMBOL(drm_connector_init); + +/** + * drm_mode_connector_attach_encoder - attach a connector to an encoder + * @connector: connector to attach + * @encoder: encoder to attach @connector to + * + * This function links up a connector to an encoder. Note that the routing + * restrictions between encoders and crtcs are exposed to userspace through the + * possible_clones and possible_crtcs bitmasks. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_connector_attach_encoder(struct drm_connector *connector, + struct drm_encoder *encoder) +{ + int i; + + /* + * In the past, drivers have attempted to model the static association + * of connector to encoder in simple connector/encoder devices using a + * direct assignment of connector->encoder = encoder. This connection + * is a logical one and the responsibility of the core, so drivers are + * expected not to mess with this. + * + * Note that the error return should've been enough here, but a large + * majority of drivers ignores the return value, so add in a big WARN + * to get people's attention. + */ + if (WARN_ON(connector->encoder)) + return -EINVAL; + + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { + if (connector->encoder_ids[i] == 0) { + connector->encoder_ids[i] = encoder->base.id; + return 0; + } + } + return -ENOMEM; +} +EXPORT_SYMBOL(drm_mode_connector_attach_encoder); + +static void drm_mode_remove(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + list_del(&mode->head); + drm_mode_destroy(connector->dev, mode); +} + +/** + * drm_connector_cleanup - cleans up an initialised connector + * @connector: connector to cleanup + * + * Cleans up the connector but doesn't free the object. + */ +void drm_connector_cleanup(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_display_mode *mode, *t; + + /* The connector should have been removed from userspace long before + * it is finally destroyed. + */ + if (WARN_ON(connector->registered)) + drm_connector_unregister(connector); + + if (connector->tile_group) { + drm_mode_put_tile_group(dev, connector->tile_group); + connector->tile_group = NULL; + } + + list_for_each_entry_safe(mode, t, &connector->probed_modes, head) + drm_mode_remove(connector, mode); + + list_for_each_entry_safe(mode, t, &connector->modes, head) + drm_mode_remove(connector, mode); + + ida_remove(&drm_connector_enum_list[connector->connector_type].ida, + connector->connector_type_id); + + ida_remove(&dev->mode_config.connector_ida, + connector->index); + + kfree(connector->display_info.bus_formats); + drm_mode_object_unregister(dev, &connector->base); + kfree(connector->name); + connector->name = NULL; + list_del(&connector->head); + dev->mode_config.num_connector--; + + WARN_ON(connector->state && !connector->funcs->atomic_destroy_state); + if (connector->state && connector->funcs->atomic_destroy_state) + connector->funcs->atomic_destroy_state(connector, + connector->state); + + memset(connector, 0, sizeof(*connector)); +} +EXPORT_SYMBOL(drm_connector_cleanup); + +/** + * drm_connector_register - register a connector + * @connector: the connector to register + * + * Register userspace interfaces for a connector + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_connector_register(struct drm_connector *connector) +{ + int ret; + + if (connector->registered) + return 0; + + ret = drm_sysfs_connector_add(connector); + if (ret) + return ret; + + ret = drm_debugfs_connector_add(connector); + if (ret) { + goto err_sysfs; + } + + if (connector->funcs->late_register) { + ret = connector->funcs->late_register(connector); + if (ret) + goto err_debugfs; + } + + drm_mode_object_register(connector->dev, &connector->base); + + connector->registered = true; + return 0; + +err_debugfs: + drm_debugfs_connector_remove(connector); +err_sysfs: + drm_sysfs_connector_remove(connector); + return ret; +} +EXPORT_SYMBOL(drm_connector_register); + +/** + * drm_connector_unregister - unregister a connector + * @connector: the connector to unregister + * + * Unregister userspace interfaces for a connector + */ +void drm_connector_unregister(struct drm_connector *connector) +{ + if (!connector->registered) + return; + + if (connector->funcs->early_unregister) + connector->funcs->early_unregister(connector); + + drm_sysfs_connector_remove(connector); + drm_debugfs_connector_remove(connector); + + connector->registered = false; +} +EXPORT_SYMBOL(drm_connector_unregister); + +void drm_connector_unregister_all(struct drm_device *dev) +{ + struct drm_connector *connector; + + /* FIXME: taking the mode config mutex ends up in a clash with sysfs */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) + drm_connector_unregister(connector); +} + +int drm_connector_register_all(struct drm_device *dev) +{ + struct drm_connector *connector; + int ret; + + /* FIXME: taking the mode config mutex ends up in a clash with + * fbcon/backlight registration */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + ret = drm_connector_register(connector); + if (ret) + goto err; + } + + return 0; + +err: + mutex_unlock(&dev->mode_config.mutex); + drm_connector_unregister_all(dev); + return ret; +} + +/** + * drm_get_connector_status_name - return a string for connector status + * @status: connector status to compute name of + * + * In contrast to the other drm_get_*_name functions this one here returns a + * const pointer and hence is threadsafe. + */ +const char *drm_get_connector_status_name(enum drm_connector_status status) +{ + if (status == connector_status_connected) + return "connected"; + else if (status == connector_status_disconnected) + return "disconnected"; + else + return "unknown"; +} +EXPORT_SYMBOL(drm_get_connector_status_name); + +static const struct drm_prop_enum_list drm_subpixel_enum_list[] = { + { SubPixelUnknown, "Unknown" }, + { SubPixelHorizontalRGB, "Horizontal RGB" }, + { SubPixelHorizontalBGR, "Horizontal BGR" }, + { SubPixelVerticalRGB, "Vertical RGB" }, + { SubPixelVerticalBGR, "Vertical BGR" }, + { SubPixelNone, "None" }, +}; + +/** + * drm_get_subpixel_order_name - return a string for a given subpixel enum + * @order: enum of subpixel_order + * + * Note you could abuse this and return something out of bounds, but that + * would be a caller error. No unscrubbed user data should make it here. + */ +const char *drm_get_subpixel_order_name(enum subpixel_order order) +{ + return drm_subpixel_enum_list[order].name; +} +EXPORT_SYMBOL(drm_get_subpixel_order_name); + +static const struct drm_prop_enum_list drm_dpms_enum_list[] = { + { DRM_MODE_DPMS_ON, "On" }, + { DRM_MODE_DPMS_STANDBY, "Standby" }, + { DRM_MODE_DPMS_SUSPEND, "Suspend" }, + { DRM_MODE_DPMS_OFF, "Off" } +}; +DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list) + +/** + * drm_display_info_set_bus_formats - set the supported bus formats + * @info: display info to store bus formats in + * @formats: array containing the supported bus formats + * @num_formats: the number of entries in the fmts array + * + * Store the supported bus formats in display info structure. + * See MEDIA_BUS_FMT_* definitions in include/uapi/linux/media-bus-format.h for + * a full list of available formats. + */ +int drm_display_info_set_bus_formats(struct drm_display_info *info, + const u32 *formats, + unsigned int num_formats) +{ + u32 *fmts = NULL; + + if (!formats && num_formats) + return -EINVAL; + + if (formats && num_formats) { + fmts = kmemdup(formats, sizeof(*formats) * num_formats, + GFP_KERNEL); + if (!fmts) + return -ENOMEM; + } + + kfree(info->bus_formats); + info->bus_formats = fmts; + info->num_bus_formats = num_formats; + + return 0; +} +EXPORT_SYMBOL(drm_display_info_set_bus_formats); + +/* Optional connector properties. */ +static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] = { + { DRM_MODE_SCALE_NONE, "None" }, + { DRM_MODE_SCALE_FULLSCREEN, "Full" }, + { DRM_MODE_SCALE_CENTER, "Center" }, + { DRM_MODE_SCALE_ASPECT, "Full aspect" }, +}; + +static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = { + { DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" }, + { DRM_MODE_PICTURE_ASPECT_4_3, "4:3" }, + { DRM_MODE_PICTURE_ASPECT_16_9, "16:9" }, +}; + +static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = { + { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */ + { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */ + { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */ +}; +DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list) + +static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = { + { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ + { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */ + { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */ +}; +DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name, + drm_dvi_i_subconnector_enum_list) + +static const struct drm_prop_enum_list drm_tv_select_enum_list[] = { + { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */ + { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ + { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ + { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ + { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */ +}; +DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list) + +static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = { + { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ + { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ + { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ + { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ + { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */ +}; +DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, + drm_tv_subconnector_enum_list) + +int drm_connector_create_standard_properties(struct drm_device *dev) +{ + struct drm_property *prop; + + prop = drm_property_create(dev, DRM_MODE_PROP_BLOB | + DRM_MODE_PROP_IMMUTABLE, + "EDID", 0); + if (!prop) + return -ENOMEM; + dev->mode_config.edid_property = prop; + + prop = drm_property_create_enum(dev, 0, + "DPMS", drm_dpms_enum_list, + ARRAY_SIZE(drm_dpms_enum_list)); + if (!prop) + return -ENOMEM; + dev->mode_config.dpms_property = prop; + + prop = drm_property_create(dev, + DRM_MODE_PROP_BLOB | + DRM_MODE_PROP_IMMUTABLE, + "PATH", 0); + if (!prop) + return -ENOMEM; + dev->mode_config.path_property = prop; + + prop = drm_property_create(dev, + DRM_MODE_PROP_BLOB | + DRM_MODE_PROP_IMMUTABLE, + "TILE", 0); + if (!prop) + return -ENOMEM; + dev->mode_config.tile_property = prop; + + return 0; +} + +/** + * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties + * @dev: DRM device + * + * Called by a driver the first time a DVI-I connector is made. + */ +int drm_mode_create_dvi_i_properties(struct drm_device *dev) +{ + struct drm_property *dvi_i_selector; + struct drm_property *dvi_i_subconnector; + + if (dev->mode_config.dvi_i_select_subconnector_property) + return 0; + + dvi_i_selector = + drm_property_create_enum(dev, 0, + "select subconnector", + drm_dvi_i_select_enum_list, + ARRAY_SIZE(drm_dvi_i_select_enum_list)); + dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector; + + dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, + "subconnector", + drm_dvi_i_subconnector_enum_list, + ARRAY_SIZE(drm_dvi_i_subconnector_enum_list)); + dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector; + + return 0; +} +EXPORT_SYMBOL(drm_mode_create_dvi_i_properties); + +/** + * drm_create_tv_properties - create TV specific connector properties + * @dev: DRM device + * @num_modes: number of different TV formats (modes) supported + * @modes: array of pointers to strings containing name of each format + * + * Called by a driver's TV initialization routine, this function creates + * the TV specific connector properties for a given device. Caller is + * responsible for allocating a list of format names and passing them to + * this routine. + */ +int drm_mode_create_tv_properties(struct drm_device *dev, + unsigned int num_modes, + const char * const modes[]) +{ + struct drm_property *tv_selector; + struct drm_property *tv_subconnector; + unsigned int i; + + if (dev->mode_config.tv_select_subconnector_property) + return 0; + + /* + * Basic connector properties + */ + tv_selector = drm_property_create_enum(dev, 0, + "select subconnector", + drm_tv_select_enum_list, + ARRAY_SIZE(drm_tv_select_enum_list)); + if (!tv_selector) + goto nomem; + + dev->mode_config.tv_select_subconnector_property = tv_selector; + + tv_subconnector = + drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, + "subconnector", + drm_tv_subconnector_enum_list, + ARRAY_SIZE(drm_tv_subconnector_enum_list)); + if (!tv_subconnector) + goto nomem; + dev->mode_config.tv_subconnector_property = tv_subconnector; + + /* + * Other, TV specific properties: margins & TV modes. + */ + dev->mode_config.tv_left_margin_property = + drm_property_create_range(dev, 0, "left margin", 0, 100); + if (!dev->mode_config.tv_left_margin_property) + goto nomem; + + dev->mode_config.tv_right_margin_property = + drm_property_create_range(dev, 0, "right margin", 0, 100); + if (!dev->mode_config.tv_right_margin_property) + goto nomem; + + dev->mode_config.tv_top_margin_property = + drm_property_create_range(dev, 0, "top margin", 0, 100); + if (!dev->mode_config.tv_top_margin_property) + goto nomem; + + dev->mode_config.tv_bottom_margin_property = + drm_property_create_range(dev, 0, "bottom margin", 0, 100); + if (!dev->mode_config.tv_bottom_margin_property) + goto nomem; + + dev->mode_config.tv_mode_property = + drm_property_create(dev, DRM_MODE_PROP_ENUM, + "mode", num_modes); + if (!dev->mode_config.tv_mode_property) + goto nomem; + + for (i = 0; i < num_modes; i++) + drm_property_add_enum(dev->mode_config.tv_mode_property, i, + i, modes[i]); + + dev->mode_config.tv_brightness_property = + drm_property_create_range(dev, 0, "brightness", 0, 100); + if (!dev->mode_config.tv_brightness_property) + goto nomem; + + dev->mode_config.tv_contrast_property = + drm_property_create_range(dev, 0, "contrast", 0, 100); + if (!dev->mode_config.tv_contrast_property) + goto nomem; + + dev->mode_config.tv_flicker_reduction_property = + drm_property_create_range(dev, 0, "flicker reduction", 0, 100); + if (!dev->mode_config.tv_flicker_reduction_property) + goto nomem; + + dev->mode_config.tv_overscan_property = + drm_property_create_range(dev, 0, "overscan", 0, 100); + if (!dev->mode_config.tv_overscan_property) + goto nomem; + + dev->mode_config.tv_saturation_property = + drm_property_create_range(dev, 0, "saturation", 0, 100); + if (!dev->mode_config.tv_saturation_property) + goto nomem; + + dev->mode_config.tv_hue_property = + drm_property_create_range(dev, 0, "hue", 0, 100); + if (!dev->mode_config.tv_hue_property) + goto nomem; + + return 0; +nomem: + return -ENOMEM; +} +EXPORT_SYMBOL(drm_mode_create_tv_properties); + +/** + * drm_mode_create_scaling_mode_property - create scaling mode property + * @dev: DRM device + * + * Called by a driver the first time it's needed, must be attached to desired + * connectors. + */ +int drm_mode_create_scaling_mode_property(struct drm_device *dev) +{ + struct drm_property *scaling_mode; + + if (dev->mode_config.scaling_mode_property) + return 0; + + scaling_mode = + drm_property_create_enum(dev, 0, "scaling mode", + drm_scaling_mode_enum_list, + ARRAY_SIZE(drm_scaling_mode_enum_list)); + + dev->mode_config.scaling_mode_property = scaling_mode; + + return 0; +} +EXPORT_SYMBOL(drm_mode_create_scaling_mode_property); + +/** + * drm_mode_create_aspect_ratio_property - create aspect ratio property + * @dev: DRM device + * + * Called by a driver the first time it's needed, must be attached to desired + * connectors. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_create_aspect_ratio_property(struct drm_device *dev) +{ + if (dev->mode_config.aspect_ratio_property) + return 0; + + dev->mode_config.aspect_ratio_property = + drm_property_create_enum(dev, 0, "aspect ratio", + drm_aspect_ratio_enum_list, + ARRAY_SIZE(drm_aspect_ratio_enum_list)); + + if (dev->mode_config.aspect_ratio_property == NULL) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property); + +/** + * drm_mode_create_suggested_offset_properties - create suggests offset properties + * @dev: DRM device + * + * Create the the suggested x/y offset property for connectors. + */ +int drm_mode_create_suggested_offset_properties(struct drm_device *dev) +{ + if (dev->mode_config.suggested_x_property && dev->mode_config.suggested_y_property) + return 0; + + dev->mode_config.suggested_x_property = + drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested X", 0, 0xffffffff); + + dev->mode_config.suggested_y_property = + drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested Y", 0, 0xffffffff); + + if (dev->mode_config.suggested_x_property == NULL || + dev->mode_config.suggested_y_property == NULL) + return -ENOMEM; + return 0; +} +EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties); + +/** + * drm_mode_connector_set_path_property - set tile property on connector + * @connector: connector to set property on. + * @path: path to use for property; must not be NULL. + * + * This creates a property to expose to userspace to specify a + * connector path. This is mainly used for DisplayPort MST where + * connectors have a topology and we want to allow userspace to give + * them more meaningful names. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_connector_set_path_property(struct drm_connector *connector, + const char *path) +{ + struct drm_device *dev = connector->dev; + int ret; + + ret = drm_property_replace_global_blob(dev, + &connector->path_blob_ptr, + strlen(path) + 1, + path, + &connector->base, + dev->mode_config.path_property); + return ret; +} +EXPORT_SYMBOL(drm_mode_connector_set_path_property); + +/** + * drm_mode_connector_set_tile_property - set tile property on connector + * @connector: connector to set property on. + * + * This looks up the tile information for a connector, and creates a + * property for userspace to parse if it exists. The property is of + * the form of 8 integers using ':' as a separator. + * + * Returns: + * Zero on success, errno on failure. + */ +int drm_mode_connector_set_tile_property(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + char tile[256]; + int ret; + + if (!connector->has_tile) { + ret = drm_property_replace_global_blob(dev, + &connector->tile_blob_ptr, + 0, + NULL, + &connector->base, + dev->mode_config.tile_property); + return ret; + } + + snprintf(tile, 256, "%d:%d:%d:%d:%d:%d:%d:%d", + connector->tile_group->id, connector->tile_is_single_monitor, + connector->num_h_tile, connector->num_v_tile, + connector->tile_h_loc, connector->tile_v_loc, + connector->tile_h_size, connector->tile_v_size); + + ret = drm_property_replace_global_blob(dev, + &connector->tile_blob_ptr, + strlen(tile) + 1, + tile, + &connector->base, + dev->mode_config.tile_property); + return ret; +} +EXPORT_SYMBOL(drm_mode_connector_set_tile_property); + +/** + * drm_mode_connector_update_edid_property - update the edid property of a connector + * @connector: drm connector + * @edid: new value of the edid property + * + * This function creates a new blob modeset object and assigns its id to the + * connector's edid property. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_connector_update_edid_property(struct drm_connector *connector, + const struct edid *edid) +{ + struct drm_device *dev = connector->dev; + size_t size = 0; + int ret; + + /* ignore requests to set edid when overridden */ + if (connector->override_edid) + return 0; + + if (edid) + size = EDID_LENGTH * (1 + edid->extensions); + + ret = drm_property_replace_global_blob(dev, + &connector->edid_blob_ptr, + size, + edid, + &connector->base, + dev->mode_config.edid_property); + return ret; +} +EXPORT_SYMBOL(drm_mode_connector_update_edid_property); + +int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t value) +{ + int ret = -EINVAL; + struct drm_connector *connector = obj_to_connector(obj); + + /* Do DPMS ourselves */ + if (property == connector->dev->mode_config.dpms_property) { + ret = (*connector->funcs->dpms)(connector, (int)value); + } else if (connector->funcs->set_property) + ret = connector->funcs->set_property(connector, property, value); + + /* store the property value if successful */ + if (!ret) + drm_object_property_set_value(&connector->base, property, value); + return ret; +} + +int drm_mode_connector_property_set_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_connector_set_property *conn_set_prop = data; + struct drm_mode_obj_set_property obj_set_prop = { + .value = conn_set_prop->value, + .prop_id = conn_set_prop->prop_id, + .obj_id = conn_set_prop->connector_id, + .obj_type = DRM_MODE_OBJECT_CONNECTOR + }; + + /* It does all the locking and checking we need */ + return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv); +} + +static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *connector) +{ + /* For atomic drivers only state objects are synchronously updated and + * protected by modeset locks, so check those first. */ + if (connector->state) + return connector->state->best_encoder; + return connector->encoder; +} + +static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode, + const struct drm_file *file_priv) +{ + /* + * If user-space hasn't configured the driver to expose the stereo 3D + * modes, don't expose them. + */ + if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode)) + return false; + + return true; +} + +int drm_mode_getconnector(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_mode_get_connector *out_resp = data; + struct drm_connector *connector; + struct drm_encoder *encoder; + struct drm_display_mode *mode; + int mode_count = 0; + int encoders_count = 0; + int ret = 0; + int copied = 0; + int i; + struct drm_mode_modeinfo u_mode; + struct drm_mode_modeinfo __user *mode_ptr; + uint32_t __user *encoder_ptr; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); + + mutex_lock(&dev->mode_config.mutex); + + connector = drm_connector_lookup(dev, out_resp->connector_id); + if (!connector) { + ret = -ENOENT; + goto out_unlock; + } + + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) + if (connector->encoder_ids[i] != 0) + encoders_count++; + + if (out_resp->count_modes == 0) { + connector->funcs->fill_modes(connector, + dev->mode_config.max_width, + dev->mode_config.max_height); + } + + /* delayed so we get modes regardless of pre-fill_modes state */ + list_for_each_entry(mode, &connector->modes, head) + if (drm_mode_expose_to_userspace(mode, file_priv)) + mode_count++; + + out_resp->connector_id = connector->base.id; + out_resp->connector_type = connector->connector_type; + out_resp->connector_type_id = connector->connector_type_id; + out_resp->mm_width = connector->display_info.width_mm; + out_resp->mm_height = connector->display_info.height_mm; + out_resp->subpixel = connector->display_info.subpixel_order; + out_resp->connection = connector->status; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + encoder = drm_connector_get_encoder(connector); + if (encoder) + out_resp->encoder_id = encoder->base.id; + else + out_resp->encoder_id = 0; + + /* + * This ioctl is called twice, once to determine how much space is + * needed, and the 2nd time to fill it. + */ + if ((out_resp->count_modes >= mode_count) && mode_count) { + copied = 0; + mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr; + list_for_each_entry(mode, &connector->modes, head) { + if (!drm_mode_expose_to_userspace(mode, file_priv)) + continue; + + drm_mode_convert_to_umode(&u_mode, mode); + if (copy_to_user(mode_ptr + copied, + &u_mode, sizeof(u_mode))) { + ret = -EFAULT; + goto out; + } + copied++; + } + } + out_resp->count_modes = mode_count; + + ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic, + (uint32_t __user *)(unsigned long)(out_resp->props_ptr), + (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), + &out_resp->count_props); + if (ret) + goto out; + + if ((out_resp->count_encoders >= encoders_count) && encoders_count) { + copied = 0; + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr); + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { + if (connector->encoder_ids[i] != 0) { + if (put_user(connector->encoder_ids[i], + encoder_ptr + copied)) { + ret = -EFAULT; + goto out; + } + copied++; + } + } + } + out_resp->count_encoders = encoders_count; + +out: + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + drm_connector_unreference(connector); +out_unlock: + mutex_unlock(&dev->mode_config.mutex); + + return ret; +} + diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c index 192a5f9eeb74..3c4000facb36 100644 --- a/drivers/gpu/drm/drm_context.c +++ b/drivers/gpu/drm/drm_context.c @@ -54,7 +54,7 @@ struct drm_ctx_list { void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle) { if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && - drm_core_check_feature(dev, DRIVER_MODESET)) + !drm_core_check_feature(dev, DRIVER_LEGACY)) return; mutex_lock(&dev->struct_mutex); @@ -92,7 +92,7 @@ static int drm_legacy_ctxbitmap_next(struct drm_device * dev) void drm_legacy_ctxbitmap_init(struct drm_device * dev) { if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && - drm_core_check_feature(dev, DRIVER_MODESET)) + !drm_core_check_feature(dev, DRIVER_LEGACY)) return; idr_init(&dev->ctx_idr); @@ -109,7 +109,7 @@ void drm_legacy_ctxbitmap_init(struct drm_device * dev) void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev) { if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && - drm_core_check_feature(dev, DRIVER_MODESET)) + !drm_core_check_feature(dev, DRIVER_LEGACY)) return; mutex_lock(&dev->struct_mutex); @@ -131,7 +131,7 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file) struct drm_ctx_list *pos, *tmp; if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && - drm_core_check_feature(dev, DRIVER_MODESET)) + !drm_core_check_feature(dev, DRIVER_LEGACY)) return; mutex_lock(&dev->ctxlist_mutex); @@ -177,7 +177,7 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data, struct drm_map_list *_entry; if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && - drm_core_check_feature(dev, DRIVER_MODESET)) + !drm_core_check_feature(dev, DRIVER_LEGACY)) return -EINVAL; mutex_lock(&dev->struct_mutex); @@ -225,7 +225,7 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data, struct drm_map_list *r_list = NULL; if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && - drm_core_check_feature(dev, DRIVER_MODESET)) + !drm_core_check_feature(dev, DRIVER_LEGACY)) return -EINVAL; mutex_lock(&dev->struct_mutex); @@ -329,7 +329,7 @@ int drm_legacy_resctx(struct drm_device *dev, void *data, int i; if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && - drm_core_check_feature(dev, DRIVER_MODESET)) + !drm_core_check_feature(dev, DRIVER_LEGACY)) return -EINVAL; if (res->count >= DRM_RESERVED_CONTEXTS) { @@ -363,7 +363,7 @@ int drm_legacy_addctx(struct drm_device *dev, void *data, struct drm_ctx *ctx = data; if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && - drm_core_check_feature(dev, DRIVER_MODESET)) + !drm_core_check_feature(dev, DRIVER_LEGACY)) return -EINVAL; ctx->handle = drm_legacy_ctxbitmap_next(dev); @@ -410,7 +410,7 @@ int drm_legacy_getctx(struct drm_device *dev, void *data, struct drm_ctx *ctx = data; if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && - drm_core_check_feature(dev, DRIVER_MODESET)) + !drm_core_check_feature(dev, DRIVER_LEGACY)) return -EINVAL; /* This is 0, because we don't handle any context flags */ @@ -436,7 +436,7 @@ int drm_legacy_switchctx(struct drm_device *dev, void *data, struct drm_ctx *ctx = data; if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && - drm_core_check_feature(dev, DRIVER_MODESET)) + !drm_core_check_feature(dev, DRIVER_LEGACY)) return -EINVAL; DRM_DEBUG("%d\n", ctx->handle); @@ -460,7 +460,7 @@ int drm_legacy_newctx(struct drm_device *dev, void *data, struct drm_ctx *ctx = data; if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && - drm_core_check_feature(dev, DRIVER_MODESET)) + !drm_core_check_feature(dev, DRIVER_LEGACY)) return -EINVAL; DRM_DEBUG("%d\n", ctx->handle); @@ -486,7 +486,7 @@ int drm_legacy_rmctx(struct drm_device *dev, void *data, struct drm_ctx *ctx = data; if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && - drm_core_check_feature(dev, DRIVER_MODESET)) + !drm_core_check_feature(dev, DRIVER_LEGACY)) return -EINVAL; DRM_DEBUG("%d\n", ctx->handle); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index f1d9f0569d7f..631691bae01d 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -40,39 +40,14 @@ #include <drm/drm_modeset_lock.h> #include <drm/drm_atomic.h> #include <drm/drm_auth.h> +#include <drm/drm_framebuffer.h> #include "drm_crtc_internal.h" #include "drm_internal.h" -static struct drm_framebuffer * -internal_framebuffer_create(struct drm_device *dev, - const struct drm_mode_fb_cmd2 *r, - struct drm_file *file_priv); - -/* Avoid boilerplate. I'm tired of typing. */ -#define DRM_ENUM_NAME_FN(fnname, list) \ - const char *fnname(int val) \ - { \ - int i; \ - for (i = 0; i < ARRAY_SIZE(list); i++) { \ - if (list[i].type == val) \ - return list[i].name; \ - } \ - return "(unknown)"; \ - } - /* * Global properties */ -static const struct drm_prop_enum_list drm_dpms_enum_list[] = { - { DRM_MODE_DPMS_ON, "On" }, - { DRM_MODE_DPMS_STANDBY, "Standby" }, - { DRM_MODE_DPMS_SUSPEND, "Suspend" }, - { DRM_MODE_DPMS_OFF, "Off" } -}; - -DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list) - static const struct drm_prop_enum_list drm_plane_type_enum_list[] = { { DRM_PLANE_TYPE_OVERLAY, "Overlay" }, { DRM_PLANE_TYPE_PRIMARY, "Primary" }, @@ -82,320 +57,6 @@ static const struct drm_prop_enum_list drm_plane_type_enum_list[] = { /* * Optional properties */ -static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] = { - { DRM_MODE_SCALE_NONE, "None" }, - { DRM_MODE_SCALE_FULLSCREEN, "Full" }, - { DRM_MODE_SCALE_CENTER, "Center" }, - { DRM_MODE_SCALE_ASPECT, "Full aspect" }, -}; - -static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = { - { DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" }, - { DRM_MODE_PICTURE_ASPECT_4_3, "4:3" }, - { DRM_MODE_PICTURE_ASPECT_16_9, "16:9" }, -}; - -/* - * Non-global properties, but "required" for certain connectors. - */ -static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = { - { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */ - { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */ - { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */ -}; - -DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list) - -static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = { - { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ - { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */ - { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */ -}; - -DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name, - drm_dvi_i_subconnector_enum_list) - -static const struct drm_prop_enum_list drm_tv_select_enum_list[] = { - { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */ - { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ - { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ - { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ - { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */ -}; - -DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list) - -static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = { - { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ - { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ - { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ - { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ - { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */ -}; - -DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, - drm_tv_subconnector_enum_list) - -static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = { - { DRM_MODE_DIRTY_OFF, "Off" }, - { DRM_MODE_DIRTY_ON, "On" }, - { DRM_MODE_DIRTY_ANNOTATE, "Annotate" }, -}; - -struct drm_conn_prop_enum_list { - int type; - const char *name; - struct ida ida; -}; - -/* - * Connector and encoder types. - */ -static struct drm_conn_prop_enum_list drm_connector_enum_list[] = { - { DRM_MODE_CONNECTOR_Unknown, "Unknown" }, - { DRM_MODE_CONNECTOR_VGA, "VGA" }, - { DRM_MODE_CONNECTOR_DVII, "DVI-I" }, - { DRM_MODE_CONNECTOR_DVID, "DVI-D" }, - { DRM_MODE_CONNECTOR_DVIA, "DVI-A" }, - { DRM_MODE_CONNECTOR_Composite, "Composite" }, - { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" }, - { DRM_MODE_CONNECTOR_LVDS, "LVDS" }, - { DRM_MODE_CONNECTOR_Component, "Component" }, - { DRM_MODE_CONNECTOR_9PinDIN, "DIN" }, - { DRM_MODE_CONNECTOR_DisplayPort, "DP" }, - { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" }, - { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" }, - { DRM_MODE_CONNECTOR_TV, "TV" }, - { DRM_MODE_CONNECTOR_eDP, "eDP" }, - { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" }, - { DRM_MODE_CONNECTOR_DSI, "DSI" }, - { DRM_MODE_CONNECTOR_DPI, "DPI" }, -}; - -static const struct drm_prop_enum_list drm_encoder_enum_list[] = { - { DRM_MODE_ENCODER_NONE, "None" }, - { DRM_MODE_ENCODER_DAC, "DAC" }, - { DRM_MODE_ENCODER_TMDS, "TMDS" }, - { DRM_MODE_ENCODER_LVDS, "LVDS" }, - { DRM_MODE_ENCODER_TVDAC, "TV" }, - { DRM_MODE_ENCODER_VIRTUAL, "Virtual" }, - { DRM_MODE_ENCODER_DSI, "DSI" }, - { DRM_MODE_ENCODER_DPMST, "DP MST" }, - { DRM_MODE_ENCODER_DPI, "DPI" }, -}; - -static const struct drm_prop_enum_list drm_subpixel_enum_list[] = { - { SubPixelUnknown, "Unknown" }, - { SubPixelHorizontalRGB, "Horizontal RGB" }, - { SubPixelHorizontalBGR, "Horizontal BGR" }, - { SubPixelVerticalRGB, "Vertical RGB" }, - { SubPixelVerticalBGR, "Vertical BGR" }, - { SubPixelNone, "None" }, -}; - -void drm_connector_ida_init(void) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++) - ida_init(&drm_connector_enum_list[i].ida); -} - -void drm_connector_ida_destroy(void) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++) - ida_destroy(&drm_connector_enum_list[i].ida); -} - -/** - * drm_get_connector_status_name - return a string for connector status - * @status: connector status to compute name of - * - * In contrast to the other drm_get_*_name functions this one here returns a - * const pointer and hence is threadsafe. - */ -const char *drm_get_connector_status_name(enum drm_connector_status status) -{ - if (status == connector_status_connected) - return "connected"; - else if (status == connector_status_disconnected) - return "disconnected"; - else - return "unknown"; -} -EXPORT_SYMBOL(drm_get_connector_status_name); - -/** - * drm_get_subpixel_order_name - return a string for a given subpixel enum - * @order: enum of subpixel_order - * - * Note you could abuse this and return something out of bounds, but that - * would be a caller error. No unscrubbed user data should make it here. - */ -const char *drm_get_subpixel_order_name(enum subpixel_order order) -{ - return drm_subpixel_enum_list[order].name; -} -EXPORT_SYMBOL(drm_get_subpixel_order_name); - -/* - * Internal function to assign a slot in the object idr and optionally - * register the object into the idr. - */ -static int drm_mode_object_get_reg(struct drm_device *dev, - struct drm_mode_object *obj, - uint32_t obj_type, - bool register_obj, - void (*obj_free_cb)(struct kref *kref)) -{ - int ret; - - mutex_lock(&dev->mode_config.idr_mutex); - ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL); - if (ret >= 0) { - /* - * Set up the object linking under the protection of the idr - * lock so that other users can't see inconsistent state. - */ - obj->id = ret; - obj->type = obj_type; - if (obj_free_cb) { - obj->free_cb = obj_free_cb; - kref_init(&obj->refcount); - } - } - mutex_unlock(&dev->mode_config.idr_mutex); - - return ret < 0 ? ret : 0; -} - -/** - * drm_mode_object_get - allocate a new modeset identifier - * @dev: DRM device - * @obj: object pointer, used to generate unique ID - * @obj_type: object type - * - * Create a unique identifier based on @ptr in @dev's identifier space. Used - * for tracking modes, CRTCs and connectors. Note that despite the _get postfix - * modeset identifiers are _not_ reference counted. Hence don't use this for - * reference counted modeset objects like framebuffers. - * - * Returns: - * Zero on success, error code on failure. - */ -int drm_mode_object_get(struct drm_device *dev, - struct drm_mode_object *obj, uint32_t obj_type) -{ - return drm_mode_object_get_reg(dev, obj, obj_type, true, NULL); -} - -static void drm_mode_object_register(struct drm_device *dev, - struct drm_mode_object *obj) -{ - mutex_lock(&dev->mode_config.idr_mutex); - idr_replace(&dev->mode_config.crtc_idr, obj, obj->id); - mutex_unlock(&dev->mode_config.idr_mutex); -} - -/** - * drm_mode_object_unregister - free a modeset identifer - * @dev: DRM device - * @object: object to free - * - * Free @id from @dev's unique identifier pool. - * This function can be called multiple times, and guards against - * multiple removals. - * These modeset identifiers are _not_ reference counted. Hence don't use this - * for reference counted modeset objects like framebuffers. - */ -void drm_mode_object_unregister(struct drm_device *dev, - struct drm_mode_object *object) -{ - mutex_lock(&dev->mode_config.idr_mutex); - if (object->id) { - idr_remove(&dev->mode_config.crtc_idr, object->id); - object->id = 0; - } - mutex_unlock(&dev->mode_config.idr_mutex); -} - -static struct drm_mode_object *_object_find(struct drm_device *dev, - uint32_t id, uint32_t type) -{ - struct drm_mode_object *obj = NULL; - - mutex_lock(&dev->mode_config.idr_mutex); - obj = idr_find(&dev->mode_config.crtc_idr, id); - if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type) - obj = NULL; - if (obj && obj->id != id) - obj = NULL; - - if (obj && obj->free_cb) { - if (!kref_get_unless_zero(&obj->refcount)) - obj = NULL; - } - mutex_unlock(&dev->mode_config.idr_mutex); - - return obj; -} - -/** - * drm_mode_object_find - look up a drm object with static lifetime - * @dev: drm device - * @id: id of the mode object - * @type: type of the mode object - * - * This function is used to look up a modeset object. It will acquire a - * reference for reference counted objects. This reference must be dropped again - * by callind drm_mode_object_unreference(). - */ -struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, - uint32_t id, uint32_t type) -{ - struct drm_mode_object *obj = NULL; - - obj = _object_find(dev, id, type); - return obj; -} -EXPORT_SYMBOL(drm_mode_object_find); - -/** - * drm_mode_object_unreference - decr the object refcnt - * @obj: mode_object - * - * This functions decrements the object's refcount if it is a refcounted modeset - * object. It is a no-op on any other object. This is used to drop references - * acquired with drm_mode_object_reference(). - */ -void drm_mode_object_unreference(struct drm_mode_object *obj) -{ - if (obj->free_cb) { - DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount)); - kref_put(&obj->refcount, obj->free_cb); - } -} -EXPORT_SYMBOL(drm_mode_object_unreference); - -/** - * drm_mode_object_reference - incr the object refcnt - * @obj: mode_object - * - * This functions increments the object's refcount if it is a refcounted modeset - * object. It is a no-op on any other object. References should be dropped again - * by calling drm_mode_object_unreference(). - */ -void drm_mode_object_reference(struct drm_mode_object *obj) -{ - if (obj->free_cb) { - DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount)); - kref_get(&obj->refcount); - } -} -EXPORT_SYMBOL(drm_mode_object_reference); - /** * drm_crtc_force_disable - Forcibly turn off a CRTC * @crtc: CRTC to turn off @@ -441,199 +102,6 @@ out: } EXPORT_SYMBOL(drm_crtc_force_disable_all); -static void drm_framebuffer_free(struct kref *kref) -{ - struct drm_framebuffer *fb = - container_of(kref, struct drm_framebuffer, base.refcount); - struct drm_device *dev = fb->dev; - - /* - * The lookup idr holds a weak reference, which has not necessarily been - * removed at this point. Check for that. - */ - drm_mode_object_unregister(dev, &fb->base); - - fb->funcs->destroy(fb); -} - -/** - * drm_framebuffer_init - initialize a framebuffer - * @dev: DRM device - * @fb: framebuffer to be initialized - * @funcs: ... with these functions - * - * Allocates an ID for the framebuffer's parent mode object, sets its mode - * functions & device file and adds it to the master fd list. - * - * IMPORTANT: - * This functions publishes the fb and makes it available for concurrent access - * by other users. Which means by this point the fb _must_ be fully set up - - * since all the fb attributes are invariant over its lifetime, no further - * locking but only correct reference counting is required. - * - * Returns: - * Zero on success, error code on failure. - */ -int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, - const struct drm_framebuffer_funcs *funcs) -{ - int ret; - - INIT_LIST_HEAD(&fb->filp_head); - fb->dev = dev; - fb->funcs = funcs; - - ret = drm_mode_object_get_reg(dev, &fb->base, DRM_MODE_OBJECT_FB, - false, drm_framebuffer_free); - if (ret) - goto out; - - mutex_lock(&dev->mode_config.fb_lock); - dev->mode_config.num_fb++; - list_add(&fb->head, &dev->mode_config.fb_list); - mutex_unlock(&dev->mode_config.fb_lock); - - drm_mode_object_register(dev, &fb->base); -out: - return ret; -} -EXPORT_SYMBOL(drm_framebuffer_init); - -/** - * drm_framebuffer_lookup - look up a drm framebuffer and grab a reference - * @dev: drm device - * @id: id of the fb object - * - * If successful, this grabs an additional reference to the framebuffer - - * callers need to make sure to eventually unreference the returned framebuffer - * again, using @drm_framebuffer_unreference. - */ -struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev, - uint32_t id) -{ - struct drm_mode_object *obj; - struct drm_framebuffer *fb = NULL; - - obj = _object_find(dev, id, DRM_MODE_OBJECT_FB); - if (obj) - fb = obj_to_fb(obj); - return fb; -} -EXPORT_SYMBOL(drm_framebuffer_lookup); - -/** - * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr - * @fb: fb to unregister - * - * Drivers need to call this when cleaning up driver-private framebuffers, e.g. - * those used for fbdev. Note that the caller must hold a reference of it's own, - * i.e. the object may not be destroyed through this call (since it'll lead to a - * locking inversion). - */ -void drm_framebuffer_unregister_private(struct drm_framebuffer *fb) -{ - struct drm_device *dev; - - if (!fb) - return; - - dev = fb->dev; - - /* Mark fb as reaped and drop idr ref. */ - drm_mode_object_unregister(dev, &fb->base); -} -EXPORT_SYMBOL(drm_framebuffer_unregister_private); - -/** - * drm_framebuffer_cleanup - remove a framebuffer object - * @fb: framebuffer to remove - * - * Cleanup framebuffer. This function is intended to be used from the drivers - * ->destroy callback. It can also be used to clean up driver private - * framebuffers embedded into a larger structure. - * - * Note that this function does not remove the fb from active usuage - if it is - * still used anywhere, hilarity can ensue since userspace could call getfb on - * the id and get back -EINVAL. Obviously no concern at driver unload time. - * - * Also, the framebuffer will not be removed from the lookup idr - for - * user-created framebuffers this will happen in in the rmfb ioctl. For - * driver-private objects (e.g. for fbdev) drivers need to explicitly call - * drm_framebuffer_unregister_private. - */ -void drm_framebuffer_cleanup(struct drm_framebuffer *fb) -{ - struct drm_device *dev = fb->dev; - - mutex_lock(&dev->mode_config.fb_lock); - list_del(&fb->head); - dev->mode_config.num_fb--; - mutex_unlock(&dev->mode_config.fb_lock); -} -EXPORT_SYMBOL(drm_framebuffer_cleanup); - -/** - * drm_framebuffer_remove - remove and unreference a framebuffer object - * @fb: framebuffer to remove - * - * Scans all the CRTCs and planes in @dev's mode_config. If they're - * using @fb, removes it, setting it to NULL. Then drops the reference to the - * passed-in framebuffer. Might take the modeset locks. - * - * Note that this function optimizes the cleanup away if the caller holds the - * last reference to the framebuffer. It is also guaranteed to not take the - * modeset locks in this case. - */ -void drm_framebuffer_remove(struct drm_framebuffer *fb) -{ - struct drm_device *dev; - struct drm_crtc *crtc; - struct drm_plane *plane; - - if (!fb) - return; - - dev = fb->dev; - - WARN_ON(!list_empty(&fb->filp_head)); - - /* - * drm ABI mandates that we remove any deleted framebuffers from active - * useage. But since most sane clients only remove framebuffers they no - * longer need, try to optimize this away. - * - * Since we're holding a reference ourselves, observing a refcount of 1 - * means that we're the last holder and can skip it. Also, the refcount - * can never increase from 1 again, so we don't need any barriers or - * locks. - * - * Note that userspace could try to race with use and instate a new - * usage _after_ we've cleared all current ones. End result will be an - * in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot - * in this manner. - */ - if (drm_framebuffer_read_refcount(fb) > 1) { - drm_modeset_lock_all(dev); - /* remove from any CRTC */ - drm_for_each_crtc(crtc, dev) { - if (crtc->primary->fb == fb) { - /* should turn off the crtc */ - if (drm_crtc_force_disable(crtc)) - DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc); - } - } - - drm_for_each_plane(plane, dev) { - if (plane->fb == fb) - drm_plane_force_disable(plane); - } - drm_modeset_unlock_all(dev); - } - - drm_framebuffer_unreference(fb); -} -EXPORT_SYMBOL(drm_framebuffer_remove); - DEFINE_WW_CLASS(crtc_ww_class); static unsigned int drm_num_crtcs(struct drm_device *dev) @@ -783,473 +251,6 @@ void drm_crtc_cleanup(struct drm_crtc *crtc) } EXPORT_SYMBOL(drm_crtc_cleanup); -/* - * drm_mode_remove - remove and free a mode - * @connector: connector list to modify - * @mode: mode to remove - * - * Remove @mode from @connector's mode list, then free it. - */ -static void drm_mode_remove(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - list_del(&mode->head); - drm_mode_destroy(connector->dev, mode); -} - -/** - * drm_display_info_set_bus_formats - set the supported bus formats - * @info: display info to store bus formats in - * @formats: array containing the supported bus formats - * @num_formats: the number of entries in the fmts array - * - * Store the supported bus formats in display info structure. - * See MEDIA_BUS_FMT_* definitions in include/uapi/linux/media-bus-format.h for - * a full list of available formats. - */ -int drm_display_info_set_bus_formats(struct drm_display_info *info, - const u32 *formats, - unsigned int num_formats) -{ - u32 *fmts = NULL; - - if (!formats && num_formats) - return -EINVAL; - - if (formats && num_formats) { - fmts = kmemdup(formats, sizeof(*formats) * num_formats, - GFP_KERNEL); - if (!fmts) - return -ENOMEM; - } - - kfree(info->bus_formats); - info->bus_formats = fmts; - info->num_bus_formats = num_formats; - - return 0; -} -EXPORT_SYMBOL(drm_display_info_set_bus_formats); - -/** - * drm_connector_get_cmdline_mode - reads the user's cmdline mode - * @connector: connector to quwery - * - * The kernel supports per-connector configration of its consoles through - * use of the video= parameter. This function parses that option and - * extracts the user's specified mode (or enable/disable status) for a - * particular connector. This is typically only used during the early fbdev - * setup. - */ -static void drm_connector_get_cmdline_mode(struct drm_connector *connector) -{ - struct drm_cmdline_mode *mode = &connector->cmdline_mode; - char *option = NULL; - - if (fb_get_options(connector->name, &option)) - return; - - if (!drm_mode_parse_command_line_for_connector(option, - connector, - mode)) - return; - - if (mode->force) { - const char *s; - - switch (mode->force) { - case DRM_FORCE_OFF: - s = "OFF"; - break; - case DRM_FORCE_ON_DIGITAL: - s = "ON - dig"; - break; - default: - case DRM_FORCE_ON: - s = "ON"; - break; - } - - DRM_INFO("forcing %s connector %s\n", connector->name, s); - connector->force = mode->force; - } - - DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n", - connector->name, - mode->xres, mode->yres, - mode->refresh_specified ? mode->refresh : 60, - mode->rb ? " reduced blanking" : "", - mode->margins ? " with margins" : "", - mode->interlace ? " interlaced" : ""); -} - -static void drm_connector_free(struct kref *kref) -{ - struct drm_connector *connector = - container_of(kref, struct drm_connector, base.refcount); - struct drm_device *dev = connector->dev; - - drm_mode_object_unregister(dev, &connector->base); - connector->funcs->destroy(connector); -} - -/** - * drm_connector_init - Init a preallocated connector - * @dev: DRM device - * @connector: the connector to init - * @funcs: callbacks for this connector - * @connector_type: user visible type of the connector - * - * Initialises a preallocated connector. Connectors should be - * subclassed as part of driver connector objects. - * - * Returns: - * Zero on success, error code on failure. - */ -int drm_connector_init(struct drm_device *dev, - struct drm_connector *connector, - const struct drm_connector_funcs *funcs, - int connector_type) -{ - struct drm_mode_config *config = &dev->mode_config; - int ret; - struct ida *connector_ida = - &drm_connector_enum_list[connector_type].ida; - - drm_modeset_lock_all(dev); - - ret = drm_mode_object_get_reg(dev, &connector->base, - DRM_MODE_OBJECT_CONNECTOR, - false, drm_connector_free); - if (ret) - goto out_unlock; - - connector->base.properties = &connector->properties; - connector->dev = dev; - connector->funcs = funcs; - - ret = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL); - if (ret < 0) - goto out_put; - connector->index = ret; - ret = 0; - - connector->connector_type = connector_type; - connector->connector_type_id = - ida_simple_get(connector_ida, 1, 0, GFP_KERNEL); - if (connector->connector_type_id < 0) { - ret = connector->connector_type_id; - goto out_put_id; - } - connector->name = - kasprintf(GFP_KERNEL, "%s-%d", - drm_connector_enum_list[connector_type].name, - connector->connector_type_id); - if (!connector->name) { - ret = -ENOMEM; - goto out_put_type_id; - } - - INIT_LIST_HEAD(&connector->probed_modes); - INIT_LIST_HEAD(&connector->modes); - connector->edid_blob_ptr = NULL; - connector->status = connector_status_unknown; - - drm_connector_get_cmdline_mode(connector); - - /* We should add connectors at the end to avoid upsetting the connector - * index too much. */ - list_add_tail(&connector->head, &config->connector_list); - config->num_connector++; - - if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL) - drm_object_attach_property(&connector->base, - config->edid_property, - 0); - - drm_object_attach_property(&connector->base, - config->dpms_property, 0); - - if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { - drm_object_attach_property(&connector->base, config->prop_crtc_id, 0); - } - - connector->debugfs_entry = NULL; -out_put_type_id: - if (ret) - ida_remove(connector_ida, connector->connector_type_id); -out_put_id: - if (ret) - ida_remove(&config->connector_ida, connector->index); -out_put: - if (ret) - drm_mode_object_unregister(dev, &connector->base); - -out_unlock: - drm_modeset_unlock_all(dev); - - return ret; -} -EXPORT_SYMBOL(drm_connector_init); - -/** - * drm_connector_cleanup - cleans up an initialised connector - * @connector: connector to cleanup - * - * Cleans up the connector but doesn't free the object. - */ -void drm_connector_cleanup(struct drm_connector *connector) -{ - struct drm_device *dev = connector->dev; - struct drm_display_mode *mode, *t; - - /* The connector should have been removed from userspace long before - * it is finally destroyed. - */ - if (WARN_ON(connector->registered)) - drm_connector_unregister(connector); - - if (connector->tile_group) { - drm_mode_put_tile_group(dev, connector->tile_group); - connector->tile_group = NULL; - } - - list_for_each_entry_safe(mode, t, &connector->probed_modes, head) - drm_mode_remove(connector, mode); - - list_for_each_entry_safe(mode, t, &connector->modes, head) - drm_mode_remove(connector, mode); - - ida_remove(&drm_connector_enum_list[connector->connector_type].ida, - connector->connector_type_id); - - ida_remove(&dev->mode_config.connector_ida, - connector->index); - - kfree(connector->display_info.bus_formats); - drm_mode_object_unregister(dev, &connector->base); - kfree(connector->name); - connector->name = NULL; - list_del(&connector->head); - dev->mode_config.num_connector--; - - WARN_ON(connector->state && !connector->funcs->atomic_destroy_state); - if (connector->state && connector->funcs->atomic_destroy_state) - connector->funcs->atomic_destroy_state(connector, - connector->state); - - memset(connector, 0, sizeof(*connector)); -} -EXPORT_SYMBOL(drm_connector_cleanup); - -/** - * drm_connector_register - register a connector - * @connector: the connector to register - * - * Register userspace interfaces for a connector - * - * Returns: - * Zero on success, error code on failure. - */ -int drm_connector_register(struct drm_connector *connector) -{ - int ret; - - if (connector->registered) - return 0; - - ret = drm_sysfs_connector_add(connector); - if (ret) - return ret; - - ret = drm_debugfs_connector_add(connector); - if (ret) { - goto err_sysfs; - } - - if (connector->funcs->late_register) { - ret = connector->funcs->late_register(connector); - if (ret) - goto err_debugfs; - } - - drm_mode_object_register(connector->dev, &connector->base); - - connector->registered = true; - return 0; - -err_debugfs: - drm_debugfs_connector_remove(connector); -err_sysfs: - drm_sysfs_connector_remove(connector); - return ret; -} -EXPORT_SYMBOL(drm_connector_register); - -/** - * drm_connector_unregister - unregister a connector - * @connector: the connector to unregister - * - * Unregister userspace interfaces for a connector - */ -void drm_connector_unregister(struct drm_connector *connector) -{ - if (!connector->registered) - return; - - if (connector->funcs->early_unregister) - connector->funcs->early_unregister(connector); - - drm_sysfs_connector_remove(connector); - drm_debugfs_connector_remove(connector); - - connector->registered = false; -} -EXPORT_SYMBOL(drm_connector_unregister); - -static void drm_connector_unregister_all(struct drm_device *dev) -{ - struct drm_connector *connector; - - /* FIXME: taking the mode config mutex ends up in a clash with sysfs */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) - drm_connector_unregister(connector); -} - -static int drm_connector_register_all(struct drm_device *dev) -{ - struct drm_connector *connector; - int ret; - - mutex_lock(&dev->mode_config.mutex); - - drm_for_each_connector(connector, dev) { - ret = drm_connector_register(connector); - if (ret) - goto err; - } - - mutex_unlock(&dev->mode_config.mutex); - - return 0; - -err: - mutex_unlock(&dev->mode_config.mutex); - drm_connector_unregister_all(dev); - return ret; -} - -static int drm_encoder_register_all(struct drm_device *dev) -{ - struct drm_encoder *encoder; - int ret = 0; - - drm_for_each_encoder(encoder, dev) { - if (encoder->funcs->late_register) - ret = encoder->funcs->late_register(encoder); - if (ret) - return ret; - } - - return 0; -} - -static void drm_encoder_unregister_all(struct drm_device *dev) -{ - struct drm_encoder *encoder; - - drm_for_each_encoder(encoder, dev) { - if (encoder->funcs->early_unregister) - encoder->funcs->early_unregister(encoder); - } -} - -/** - * drm_encoder_init - Init a preallocated encoder - * @dev: drm device - * @encoder: the encoder to init - * @funcs: callbacks for this encoder - * @encoder_type: user visible type of the encoder - * @name: printf style format string for the encoder name, or NULL for default name - * - * Initialises a preallocated encoder. Encoder should be - * subclassed as part of driver encoder objects. - * - * Returns: - * Zero on success, error code on failure. - */ -int drm_encoder_init(struct drm_device *dev, - struct drm_encoder *encoder, - const struct drm_encoder_funcs *funcs, - int encoder_type, const char *name, ...) -{ - int ret; - - drm_modeset_lock_all(dev); - - ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER); - if (ret) - goto out_unlock; - - encoder->dev = dev; - encoder->encoder_type = encoder_type; - encoder->funcs = funcs; - if (name) { - va_list ap; - - va_start(ap, name); - encoder->name = kvasprintf(GFP_KERNEL, name, ap); - va_end(ap); - } else { - encoder->name = kasprintf(GFP_KERNEL, "%s-%d", - drm_encoder_enum_list[encoder_type].name, - encoder->base.id); - } - if (!encoder->name) { - ret = -ENOMEM; - goto out_put; - } - - list_add_tail(&encoder->head, &dev->mode_config.encoder_list); - encoder->index = dev->mode_config.num_encoder++; - -out_put: - if (ret) - drm_mode_object_unregister(dev, &encoder->base); - -out_unlock: - drm_modeset_unlock_all(dev); - - return ret; -} -EXPORT_SYMBOL(drm_encoder_init); - -/** - * drm_encoder_cleanup - cleans up an initialised encoder - * @encoder: encoder to cleanup - * - * Cleans up the encoder but doesn't free the object. - */ -void drm_encoder_cleanup(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - - /* Note that the encoder_list is considered to be static; should we - * remove the drm_encoder at runtime we would have to decrement all - * the indices on the drm_encoder after us in the encoder_list. - */ - - drm_modeset_lock_all(dev); - drm_mode_object_unregister(dev, &encoder->base); - kfree(encoder->name); - list_del(&encoder->head); - dev->mode_config.num_encoder--; - drm_modeset_unlock_all(dev); - - memset(encoder, 0, sizeof(*encoder)); -} -EXPORT_SYMBOL(drm_encoder_cleanup); - static unsigned int drm_num_planes(struct drm_device *dev) { unsigned int num = 0; @@ -1268,7 +269,7 @@ static unsigned int drm_num_planes(struct drm_device *dev) * @plane: plane object to init * @possible_crtcs: bitmask of possible CRTCs * @funcs: callbacks for the new plane - * @formats: array of supported formats (%DRM_FORMAT_*) + * @formats: array of supported formats (DRM_FORMAT\_\*) * @format_count: number of elements in @formats * @type: type of plane (overlay, primary, cursor) * @name: printf style format string for the plane name, or NULL for default name @@ -1383,7 +384,7 @@ static void drm_plane_unregister_all(struct drm_device *dev) * @plane: plane object to init * @possible_crtcs: bitmask of possible CRTCs * @funcs: callbacks for the new plane - * @formats: array of supported formats (%DRM_FORMAT_*) + * @formats: array of supported formats (DRM_FORMAT\_\*) * @format_count: number of elements in @formats * @is_primary: plane type (primary vs overlay) * @@ -1542,39 +543,11 @@ void drm_modeset_unregister_all(struct drm_device *dev) static int drm_mode_create_standard_properties(struct drm_device *dev) { struct drm_property *prop; + int ret; - /* - * Standard properties (apply to all connectors) - */ - prop = drm_property_create(dev, DRM_MODE_PROP_BLOB | - DRM_MODE_PROP_IMMUTABLE, - "EDID", 0); - if (!prop) - return -ENOMEM; - dev->mode_config.edid_property = prop; - - prop = drm_property_create_enum(dev, 0, - "DPMS", drm_dpms_enum_list, - ARRAY_SIZE(drm_dpms_enum_list)); - if (!prop) - return -ENOMEM; - dev->mode_config.dpms_property = prop; - - prop = drm_property_create(dev, - DRM_MODE_PROP_BLOB | - DRM_MODE_PROP_IMMUTABLE, - "PATH", 0); - if (!prop) - return -ENOMEM; - dev->mode_config.path_property = prop; - - prop = drm_property_create(dev, - DRM_MODE_PROP_BLOB | - DRM_MODE_PROP_IMMUTABLE, - "TILE", 0); - if (!prop) - return -ENOMEM; - dev->mode_config.tile_property = prop; + ret = drm_connector_create_standard_properties(dev); + if (ret) + return ret; prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, "type", drm_plane_type_enum_list, @@ -1695,250 +668,6 @@ static int drm_mode_create_standard_properties(struct drm_device *dev) } /** - * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties - * @dev: DRM device - * - * Called by a driver the first time a DVI-I connector is made. - */ -int drm_mode_create_dvi_i_properties(struct drm_device *dev) -{ - struct drm_property *dvi_i_selector; - struct drm_property *dvi_i_subconnector; - - if (dev->mode_config.dvi_i_select_subconnector_property) - return 0; - - dvi_i_selector = - drm_property_create_enum(dev, 0, - "select subconnector", - drm_dvi_i_select_enum_list, - ARRAY_SIZE(drm_dvi_i_select_enum_list)); - dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector; - - dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, - "subconnector", - drm_dvi_i_subconnector_enum_list, - ARRAY_SIZE(drm_dvi_i_subconnector_enum_list)); - dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector; - - return 0; -} -EXPORT_SYMBOL(drm_mode_create_dvi_i_properties); - -/** - * drm_create_tv_properties - create TV specific connector properties - * @dev: DRM device - * @num_modes: number of different TV formats (modes) supported - * @modes: array of pointers to strings containing name of each format - * - * Called by a driver's TV initialization routine, this function creates - * the TV specific connector properties for a given device. Caller is - * responsible for allocating a list of format names and passing them to - * this routine. - */ -int drm_mode_create_tv_properties(struct drm_device *dev, - unsigned int num_modes, - const char * const modes[]) -{ - struct drm_property *tv_selector; - struct drm_property *tv_subconnector; - unsigned int i; - - if (dev->mode_config.tv_select_subconnector_property) - return 0; - - /* - * Basic connector properties - */ - tv_selector = drm_property_create_enum(dev, 0, - "select subconnector", - drm_tv_select_enum_list, - ARRAY_SIZE(drm_tv_select_enum_list)); - if (!tv_selector) - goto nomem; - - dev->mode_config.tv_select_subconnector_property = tv_selector; - - tv_subconnector = - drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, - "subconnector", - drm_tv_subconnector_enum_list, - ARRAY_SIZE(drm_tv_subconnector_enum_list)); - if (!tv_subconnector) - goto nomem; - dev->mode_config.tv_subconnector_property = tv_subconnector; - - /* - * Other, TV specific properties: margins & TV modes. - */ - dev->mode_config.tv_left_margin_property = - drm_property_create_range(dev, 0, "left margin", 0, 100); - if (!dev->mode_config.tv_left_margin_property) - goto nomem; - - dev->mode_config.tv_right_margin_property = - drm_property_create_range(dev, 0, "right margin", 0, 100); - if (!dev->mode_config.tv_right_margin_property) - goto nomem; - - dev->mode_config.tv_top_margin_property = - drm_property_create_range(dev, 0, "top margin", 0, 100); - if (!dev->mode_config.tv_top_margin_property) - goto nomem; - - dev->mode_config.tv_bottom_margin_property = - drm_property_create_range(dev, 0, "bottom margin", 0, 100); - if (!dev->mode_config.tv_bottom_margin_property) - goto nomem; - - dev->mode_config.tv_mode_property = - drm_property_create(dev, DRM_MODE_PROP_ENUM, - "mode", num_modes); - if (!dev->mode_config.tv_mode_property) - goto nomem; - - for (i = 0; i < num_modes; i++) - drm_property_add_enum(dev->mode_config.tv_mode_property, i, - i, modes[i]); - - dev->mode_config.tv_brightness_property = - drm_property_create_range(dev, 0, "brightness", 0, 100); - if (!dev->mode_config.tv_brightness_property) - goto nomem; - - dev->mode_config.tv_contrast_property = - drm_property_create_range(dev, 0, "contrast", 0, 100); - if (!dev->mode_config.tv_contrast_property) - goto nomem; - - dev->mode_config.tv_flicker_reduction_property = - drm_property_create_range(dev, 0, "flicker reduction", 0, 100); - if (!dev->mode_config.tv_flicker_reduction_property) - goto nomem; - - dev->mode_config.tv_overscan_property = - drm_property_create_range(dev, 0, "overscan", 0, 100); - if (!dev->mode_config.tv_overscan_property) - goto nomem; - - dev->mode_config.tv_saturation_property = - drm_property_create_range(dev, 0, "saturation", 0, 100); - if (!dev->mode_config.tv_saturation_property) - goto nomem; - - dev->mode_config.tv_hue_property = - drm_property_create_range(dev, 0, "hue", 0, 100); - if (!dev->mode_config.tv_hue_property) - goto nomem; - - return 0; -nomem: - return -ENOMEM; -} -EXPORT_SYMBOL(drm_mode_create_tv_properties); - -/** - * drm_mode_create_scaling_mode_property - create scaling mode property - * @dev: DRM device - * - * Called by a driver the first time it's needed, must be attached to desired - * connectors. - */ -int drm_mode_create_scaling_mode_property(struct drm_device *dev) -{ - struct drm_property *scaling_mode; - - if (dev->mode_config.scaling_mode_property) - return 0; - - scaling_mode = - drm_property_create_enum(dev, 0, "scaling mode", - drm_scaling_mode_enum_list, - ARRAY_SIZE(drm_scaling_mode_enum_list)); - - dev->mode_config.scaling_mode_property = scaling_mode; - - return 0; -} -EXPORT_SYMBOL(drm_mode_create_scaling_mode_property); - -/** - * drm_mode_create_aspect_ratio_property - create aspect ratio property - * @dev: DRM device - * - * Called by a driver the first time it's needed, must be attached to desired - * connectors. - * - * Returns: - * Zero on success, negative errno on failure. - */ -int drm_mode_create_aspect_ratio_property(struct drm_device *dev) -{ - if (dev->mode_config.aspect_ratio_property) - return 0; - - dev->mode_config.aspect_ratio_property = - drm_property_create_enum(dev, 0, "aspect ratio", - drm_aspect_ratio_enum_list, - ARRAY_SIZE(drm_aspect_ratio_enum_list)); - - if (dev->mode_config.aspect_ratio_property == NULL) - return -ENOMEM; - - return 0; -} -EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property); - -/** - * drm_mode_create_dirty_property - create dirty property - * @dev: DRM device - * - * Called by a driver the first time it's needed, must be attached to desired - * connectors. - */ -int drm_mode_create_dirty_info_property(struct drm_device *dev) -{ - struct drm_property *dirty_info; - - if (dev->mode_config.dirty_info_property) - return 0; - - dirty_info = - drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, - "dirty", - drm_dirty_info_enum_list, - ARRAY_SIZE(drm_dirty_info_enum_list)); - dev->mode_config.dirty_info_property = dirty_info; - - return 0; -} -EXPORT_SYMBOL(drm_mode_create_dirty_info_property); - -/** - * drm_mode_create_suggested_offset_properties - create suggests offset properties - * @dev: DRM device - * - * Create the the suggested x/y offset property for connectors. - */ -int drm_mode_create_suggested_offset_properties(struct drm_device *dev) -{ - if (dev->mode_config.suggested_x_property && dev->mode_config.suggested_y_property) - return 0; - - dev->mode_config.suggested_x_property = - drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested X", 0, 0xffffffff); - - dev->mode_config.suggested_y_property = - drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested Y", 0, 0xffffffff); - - if (dev->mode_config.suggested_x_property == NULL || - dev->mode_config.suggested_y_property == NULL) - return -ENOMEM; - return 0; -} -EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties); - -/** * drm_mode_getresources - get graphics configuration * @dev: drm device for the ioctl * @data: data pointer for the ioctl @@ -2125,263 +854,6 @@ int drm_mode_getcrtc(struct drm_device *dev, return 0; } -static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode, - const struct drm_file *file_priv) -{ - /* - * If user-space hasn't configured the driver to expose the stereo 3D - * modes, don't expose them. - */ - if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode)) - return false; - - return true; -} - -static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *connector) -{ - /* For atomic drivers only state objects are synchronously updated and - * protected by modeset locks, so check those first. */ - if (connector->state) - return connector->state->best_encoder; - return connector->encoder; -} - -/* helper for getconnector and getproperties ioctls */ -static int get_properties(struct drm_mode_object *obj, bool atomic, - uint32_t __user *prop_ptr, uint64_t __user *prop_values, - uint32_t *arg_count_props) -{ - int props_count; - int i, ret, copied; - - props_count = obj->properties->count; - if (!atomic) - props_count -= obj->properties->atomic_count; - - if ((*arg_count_props >= props_count) && props_count) { - for (i = 0, copied = 0; copied < props_count; i++) { - struct drm_property *prop = obj->properties->properties[i]; - uint64_t val; - - if ((prop->flags & DRM_MODE_PROP_ATOMIC) && !atomic) - continue; - - ret = drm_object_property_get_value(obj, prop, &val); - if (ret) - return ret; - - if (put_user(prop->base.id, prop_ptr + copied)) - return -EFAULT; - - if (put_user(val, prop_values + copied)) - return -EFAULT; - - copied++; - } - } - *arg_count_props = props_count; - - return 0; -} - -/** - * drm_mode_getconnector - get connector configuration - * @dev: drm device for the ioctl - * @data: data pointer for the ioctl - * @file_priv: drm file for the ioctl call - * - * Construct a connector configuration structure to return to the user. - * - * Called by the user via ioctl. - * - * Returns: - * Zero on success, negative errno on failure. - */ -int drm_mode_getconnector(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_mode_get_connector *out_resp = data; - struct drm_connector *connector; - struct drm_encoder *encoder; - struct drm_display_mode *mode; - int mode_count = 0; - int encoders_count = 0; - int ret = 0; - int copied = 0; - int i; - struct drm_mode_modeinfo u_mode; - struct drm_mode_modeinfo __user *mode_ptr; - uint32_t __user *encoder_ptr; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -EINVAL; - - memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); - - mutex_lock(&dev->mode_config.mutex); - - connector = drm_connector_lookup(dev, out_resp->connector_id); - if (!connector) { - ret = -ENOENT; - goto out_unlock; - } - - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) - if (connector->encoder_ids[i] != 0) - encoders_count++; - - if (out_resp->count_modes == 0) { - connector->funcs->fill_modes(connector, - dev->mode_config.max_width, - dev->mode_config.max_height); - } - - /* delayed so we get modes regardless of pre-fill_modes state */ - list_for_each_entry(mode, &connector->modes, head) - if (drm_mode_expose_to_userspace(mode, file_priv)) - mode_count++; - - out_resp->connector_id = connector->base.id; - out_resp->connector_type = connector->connector_type; - out_resp->connector_type_id = connector->connector_type_id; - out_resp->mm_width = connector->display_info.width_mm; - out_resp->mm_height = connector->display_info.height_mm; - out_resp->subpixel = connector->display_info.subpixel_order; - out_resp->connection = connector->status; - - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - encoder = drm_connector_get_encoder(connector); - if (encoder) - out_resp->encoder_id = encoder->base.id; - else - out_resp->encoder_id = 0; - - /* - * This ioctl is called twice, once to determine how much space is - * needed, and the 2nd time to fill it. - */ - if ((out_resp->count_modes >= mode_count) && mode_count) { - copied = 0; - mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr; - list_for_each_entry(mode, &connector->modes, head) { - if (!drm_mode_expose_to_userspace(mode, file_priv)) - continue; - - drm_mode_convert_to_umode(&u_mode, mode); - if (copy_to_user(mode_ptr + copied, - &u_mode, sizeof(u_mode))) { - ret = -EFAULT; - goto out; - } - copied++; - } - } - out_resp->count_modes = mode_count; - - ret = get_properties(&connector->base, file_priv->atomic, - (uint32_t __user *)(unsigned long)(out_resp->props_ptr), - (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), - &out_resp->count_props); - if (ret) - goto out; - - if ((out_resp->count_encoders >= encoders_count) && encoders_count) { - copied = 0; - encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr); - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - if (connector->encoder_ids[i] != 0) { - if (put_user(connector->encoder_ids[i], - encoder_ptr + copied)) { - ret = -EFAULT; - goto out; - } - copied++; - } - } - } - out_resp->count_encoders = encoders_count; - -out: - drm_modeset_unlock(&dev->mode_config.connection_mutex); - - drm_connector_unreference(connector); -out_unlock: - mutex_unlock(&dev->mode_config.mutex); - - return ret; -} - -static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder) -{ - struct drm_connector *connector; - struct drm_device *dev = encoder->dev; - bool uses_atomic = false; - - /* For atomic drivers only state objects are synchronously updated and - * protected by modeset locks, so check those first. */ - drm_for_each_connector(connector, dev) { - if (!connector->state) - continue; - - uses_atomic = true; - - if (connector->state->best_encoder != encoder) - continue; - - return connector->state->crtc; - } - - /* Don't return stale data (e.g. pending async disable). */ - if (uses_atomic) - return NULL; - - return encoder->crtc; -} - -/** - * drm_mode_getencoder - get encoder configuration - * @dev: drm device for the ioctl - * @data: data pointer for the ioctl - * @file_priv: drm file for the ioctl call - * - * Construct a encoder configuration structure to return to the user. - * - * Called by the user via ioctl. - * - * Returns: - * Zero on success, negative errno on failure. - */ -int drm_mode_getencoder(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_mode_get_encoder *enc_resp = data; - struct drm_encoder *encoder; - struct drm_crtc *crtc; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -EINVAL; - - encoder = drm_encoder_find(dev, enc_resp->encoder_id); - if (!encoder) - return -ENOENT; - - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - crtc = drm_encoder_get_crtc(encoder); - if (crtc) - enc_resp->crtc_id = crtc->base.id; - else - enc_resp->crtc_id = 0; - drm_modeset_unlock(&dev->mode_config.connection_mutex); - - enc_resp->encoder_type = encoder->encoder_type; - enc_resp->encoder_id = encoder->base.id; - enc_resp->possible_crtcs = encoder->possible_crtcs; - enc_resp->possible_clones = encoder->possible_clones; - - return 0; -} - /** * drm_mode_getplane_res - enumerate all plane resources * @dev: DRM device @@ -2594,8 +1066,9 @@ static int __setplane_internal(struct drm_plane *plane, /* Check whether this plane supports the fb pixel format. */ ret = drm_plane_check_pixel_format(plane, fb->pixel_format); if (ret) { - DRM_DEBUG_KMS("Invalid pixel format %s\n", - drm_get_format_name(fb->pixel_format)); + char *format_name = drm_get_format_name(fb->pixel_format); + DRM_DEBUG_KMS("Invalid pixel format %s\n", format_name); + kfree(format_name); goto out; } @@ -2804,8 +1277,8 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc, drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay); if (crtc->state && - crtc->primary->state->rotation & (BIT(DRM_ROTATE_90) | - BIT(DRM_ROTATE_270))) + crtc->primary->state->rotation & (DRM_ROTATE_90 | + DRM_ROTATE_270)) swap(hdisplay, vdisplay); return check_src_coords(x << 16, y << 16, @@ -2904,8 +1377,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, ret = drm_plane_check_pixel_format(crtc->primary, fb->pixel_format); if (ret) { - DRM_DEBUG_KMS("Invalid pixel format %s\n", - drm_get_format_name(fb->pixel_format)); + char *format_name = drm_get_format_name(fb->pixel_format); + DRM_DEBUG_KMS("Invalid pixel format %s\n", format_name); + kfree(format_name); goto out; } } @@ -3042,7 +1516,7 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc, */ if (req->flags & DRM_MODE_CURSOR_BO) { if (req->handle) { - fb = internal_framebuffer_create(dev, &fbreq, file_priv); + fb = drm_internal_framebuffer_create(dev, &fbreq, file_priv); if (IS_ERR(fb)) { DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); return PTR_ERR(fb); @@ -3197,1802 +1671,9 @@ int drm_mode_cursor2_ioctl(struct drm_device *dev, return drm_mode_cursor_common(dev, req, file_priv); } -/** - * drm_mode_legacy_fb_format - compute drm fourcc code from legacy description - * @bpp: bits per pixels - * @depth: bit depth per pixel - * - * Computes a drm fourcc pixel format code for the given @bpp/@depth values. - * Useful in fbdev emulation code, since that deals in those values. - */ -uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth) -{ - uint32_t fmt; - - switch (bpp) { - case 8: - fmt = DRM_FORMAT_C8; - break; - case 16: - if (depth == 15) - fmt = DRM_FORMAT_XRGB1555; - else - fmt = DRM_FORMAT_RGB565; - break; - case 24: - fmt = DRM_FORMAT_RGB888; - break; - case 32: - if (depth == 24) - fmt = DRM_FORMAT_XRGB8888; - else if (depth == 30) - fmt = DRM_FORMAT_XRGB2101010; - else - fmt = DRM_FORMAT_ARGB8888; - break; - default: - DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n"); - fmt = DRM_FORMAT_XRGB8888; - break; - } - - return fmt; -} -EXPORT_SYMBOL(drm_mode_legacy_fb_format); - -/** - * drm_mode_addfb - add an FB to the graphics configuration - * @dev: drm device for the ioctl - * @data: data pointer for the ioctl - * @file_priv: drm file for the ioctl call - * - * Add a new FB to the specified CRTC, given a user request. This is the - * original addfb ioctl which only supported RGB formats. - * - * Called by the user via ioctl. - * - * Returns: - * Zero on success, negative errno on failure. - */ -int drm_mode_addfb(struct drm_device *dev, - void *data, struct drm_file *file_priv) -{ - struct drm_mode_fb_cmd *or = data; - struct drm_mode_fb_cmd2 r = {}; - int ret; - - /* convert to new format and call new ioctl */ - r.fb_id = or->fb_id; - r.width = or->width; - r.height = or->height; - r.pitches[0] = or->pitch; - r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth); - r.handles[0] = or->handle; - - ret = drm_mode_addfb2(dev, &r, file_priv); - if (ret) - return ret; - - or->fb_id = r.fb_id; - - return 0; -} - -static int format_check(const struct drm_mode_fb_cmd2 *r) -{ - uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN; - - switch (format) { - case DRM_FORMAT_C8: - case DRM_FORMAT_RGB332: - case DRM_FORMAT_BGR233: - case DRM_FORMAT_XRGB4444: - case DRM_FORMAT_XBGR4444: - case DRM_FORMAT_RGBX4444: - case DRM_FORMAT_BGRX4444: - case DRM_FORMAT_ARGB4444: - case DRM_FORMAT_ABGR4444: - case DRM_FORMAT_RGBA4444: - case DRM_FORMAT_BGRA4444: - case DRM_FORMAT_XRGB1555: - case DRM_FORMAT_XBGR1555: - case DRM_FORMAT_RGBX5551: - case DRM_FORMAT_BGRX5551: - case DRM_FORMAT_ARGB1555: - case DRM_FORMAT_ABGR1555: - case DRM_FORMAT_RGBA5551: - case DRM_FORMAT_BGRA5551: - case DRM_FORMAT_RGB565: - case DRM_FORMAT_BGR565: - case DRM_FORMAT_RGB888: - case DRM_FORMAT_BGR888: - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_RGBX8888: - case DRM_FORMAT_BGRX8888: - case DRM_FORMAT_ARGB8888: - case DRM_FORMAT_ABGR8888: - case DRM_FORMAT_RGBA8888: - case DRM_FORMAT_BGRA8888: - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_RGBX1010102: - case DRM_FORMAT_BGRX1010102: - case DRM_FORMAT_ARGB2101010: - case DRM_FORMAT_ABGR2101010: - case DRM_FORMAT_RGBA1010102: - case DRM_FORMAT_BGRA1010102: - case DRM_FORMAT_YUYV: - case DRM_FORMAT_YVYU: - case DRM_FORMAT_UYVY: - case DRM_FORMAT_VYUY: - case DRM_FORMAT_AYUV: - case DRM_FORMAT_NV12: - case DRM_FORMAT_NV21: - case DRM_FORMAT_NV16: - case DRM_FORMAT_NV61: - case DRM_FORMAT_NV24: - case DRM_FORMAT_NV42: - case DRM_FORMAT_YUV410: - case DRM_FORMAT_YVU410: - case DRM_FORMAT_YUV411: - case DRM_FORMAT_YVU411: - case DRM_FORMAT_YUV420: - case DRM_FORMAT_YVU420: - case DRM_FORMAT_YUV422: - case DRM_FORMAT_YVU422: - case DRM_FORMAT_YUV444: - case DRM_FORMAT_YVU444: - return 0; - default: - DRM_DEBUG_KMS("invalid pixel format %s\n", - drm_get_format_name(r->pixel_format)); - return -EINVAL; - } -} - -static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) -{ - int ret, hsub, vsub, num_planes, i; - - ret = format_check(r); - if (ret) { - DRM_DEBUG_KMS("bad framebuffer format %s\n", - drm_get_format_name(r->pixel_format)); - return ret; - } - - hsub = drm_format_horz_chroma_subsampling(r->pixel_format); - vsub = drm_format_vert_chroma_subsampling(r->pixel_format); - num_planes = drm_format_num_planes(r->pixel_format); - - if (r->width == 0 || r->width % hsub) { - DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width); - return -EINVAL; - } - - if (r->height == 0 || r->height % vsub) { - DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height); - return -EINVAL; - } - - for (i = 0; i < num_planes; i++) { - unsigned int width = r->width / (i != 0 ? hsub : 1); - unsigned int height = r->height / (i != 0 ? vsub : 1); - unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i); - - if (!r->handles[i]) { - DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i); - return -EINVAL; - } - - if ((uint64_t) width * cpp > UINT_MAX) - return -ERANGE; - - if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX) - return -ERANGE; - - if (r->pitches[i] < width * cpp) { - DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i); - return -EINVAL; - } - - if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) { - DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n", - r->modifier[i], i); - return -EINVAL; - } - - /* modifier specific checks: */ - switch (r->modifier[i]) { - case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE: - /* NOTE: the pitch restriction may be lifted later if it turns - * out that no hw has this restriction: - */ - if (r->pixel_format != DRM_FORMAT_NV12 || - width % 128 || height % 32 || - r->pitches[i] % 128) { - DRM_DEBUG_KMS("bad modifier data for plane %d\n", i); - return -EINVAL; - } - break; - - default: - break; - } - } - - for (i = num_planes; i < 4; i++) { - if (r->modifier[i]) { - DRM_DEBUG_KMS("non-zero modifier for unused plane %d\n", i); - return -EINVAL; - } - - /* Pre-FB_MODIFIERS userspace didn't clear the structs properly. */ - if (!(r->flags & DRM_MODE_FB_MODIFIERS)) - continue; - - if (r->handles[i]) { - DRM_DEBUG_KMS("buffer object handle for unused plane %d\n", i); - return -EINVAL; - } - - if (r->pitches[i]) { - DRM_DEBUG_KMS("non-zero pitch for unused plane %d\n", i); - return -EINVAL; - } - - if (r->offsets[i]) { - DRM_DEBUG_KMS("non-zero offset for unused plane %d\n", i); - return -EINVAL; - } - } - - return 0; -} - -static struct drm_framebuffer * -internal_framebuffer_create(struct drm_device *dev, - const struct drm_mode_fb_cmd2 *r, - struct drm_file *file_priv) -{ - struct drm_mode_config *config = &dev->mode_config; - struct drm_framebuffer *fb; - int ret; - - if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) { - DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags); - return ERR_PTR(-EINVAL); - } - - if ((config->min_width > r->width) || (r->width > config->max_width)) { - DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n", - r->width, config->min_width, config->max_width); - return ERR_PTR(-EINVAL); - } - if ((config->min_height > r->height) || (r->height > config->max_height)) { - DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n", - r->height, config->min_height, config->max_height); - return ERR_PTR(-EINVAL); - } - - if (r->flags & DRM_MODE_FB_MODIFIERS && - !dev->mode_config.allow_fb_modifiers) { - DRM_DEBUG_KMS("driver does not support fb modifiers\n"); - return ERR_PTR(-EINVAL); - } - - ret = framebuffer_check(r); - if (ret) - return ERR_PTR(ret); - - fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); - if (IS_ERR(fb)) { - DRM_DEBUG_KMS("could not create framebuffer\n"); - return fb; - } - - return fb; -} - -/** - * drm_mode_addfb2 - add an FB to the graphics configuration - * @dev: drm device for the ioctl - * @data: data pointer for the ioctl - * @file_priv: drm file for the ioctl call - * - * Add a new FB to the specified CRTC, given a user request with format. This is - * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers - * and uses fourcc codes as pixel format specifiers. - * - * Called by the user via ioctl. - * - * Returns: - * Zero on success, negative errno on failure. - */ -int drm_mode_addfb2(struct drm_device *dev, - void *data, struct drm_file *file_priv) -{ - struct drm_mode_fb_cmd2 *r = data; - struct drm_framebuffer *fb; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -EINVAL; - - fb = internal_framebuffer_create(dev, r, file_priv); - if (IS_ERR(fb)) - return PTR_ERR(fb); - - DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); - r->fb_id = fb->base.id; - - /* Transfer ownership to the filp for reaping on close */ - mutex_lock(&file_priv->fbs_lock); - list_add(&fb->filp_head, &file_priv->fbs); - mutex_unlock(&file_priv->fbs_lock); - - return 0; -} - -struct drm_mode_rmfb_work { - struct work_struct work; - struct list_head fbs; -}; - -static void drm_mode_rmfb_work_fn(struct work_struct *w) -{ - struct drm_mode_rmfb_work *arg = container_of(w, typeof(*arg), work); - - while (!list_empty(&arg->fbs)) { - struct drm_framebuffer *fb = - list_first_entry(&arg->fbs, typeof(*fb), filp_head); - - list_del_init(&fb->filp_head); - drm_framebuffer_remove(fb); - } -} - -/** - * drm_mode_rmfb - remove an FB from the configuration - * @dev: drm device for the ioctl - * @data: data pointer for the ioctl - * @file_priv: drm file for the ioctl call - * - * Remove the FB specified by the user. - * - * Called by the user via ioctl. - * - * Returns: - * Zero on success, negative errno on failure. - */ -int drm_mode_rmfb(struct drm_device *dev, - void *data, struct drm_file *file_priv) -{ - struct drm_framebuffer *fb = NULL; - struct drm_framebuffer *fbl = NULL; - uint32_t *id = data; - int found = 0; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -EINVAL; - - fb = drm_framebuffer_lookup(dev, *id); - if (!fb) - return -ENOENT; - - mutex_lock(&file_priv->fbs_lock); - list_for_each_entry(fbl, &file_priv->fbs, filp_head) - if (fb == fbl) - found = 1; - if (!found) { - mutex_unlock(&file_priv->fbs_lock); - goto fail_unref; - } - - list_del_init(&fb->filp_head); - mutex_unlock(&file_priv->fbs_lock); - - /* drop the reference we picked up in framebuffer lookup */ - drm_framebuffer_unreference(fb); - - /* - * we now own the reference that was stored in the fbs list - * - * drm_framebuffer_remove may fail with -EINTR on pending signals, - * so run this in a separate stack as there's no way to correctly - * handle this after the fb is already removed from the lookup table. - */ - if (drm_framebuffer_read_refcount(fb) > 1) { - struct drm_mode_rmfb_work arg; - - INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn); - INIT_LIST_HEAD(&arg.fbs); - list_add_tail(&fb->filp_head, &arg.fbs); - - schedule_work(&arg.work); - flush_work(&arg.work); - destroy_work_on_stack(&arg.work); - } else - drm_framebuffer_unreference(fb); - - return 0; - -fail_unref: - drm_framebuffer_unreference(fb); - return -ENOENT; -} - -/** - * drm_mode_getfb - get FB info - * @dev: drm device for the ioctl - * @data: data pointer for the ioctl - * @file_priv: drm file for the ioctl call - * - * Lookup the FB given its ID and return info about it. - * - * Called by the user via ioctl. - * - * Returns: - * Zero on success, negative errno on failure. - */ -int drm_mode_getfb(struct drm_device *dev, - void *data, struct drm_file *file_priv) -{ - struct drm_mode_fb_cmd *r = data; - struct drm_framebuffer *fb; - int ret; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -EINVAL; - - fb = drm_framebuffer_lookup(dev, r->fb_id); - if (!fb) - return -ENOENT; - - r->height = fb->height; - r->width = fb->width; - r->depth = fb->depth; - r->bpp = fb->bits_per_pixel; - r->pitch = fb->pitches[0]; - if (fb->funcs->create_handle) { - if (drm_is_current_master(file_priv) || capable(CAP_SYS_ADMIN) || - drm_is_control_client(file_priv)) { - ret = fb->funcs->create_handle(fb, file_priv, - &r->handle); - } else { - /* GET_FB() is an unprivileged ioctl so we must not - * return a buffer-handle to non-master processes! For - * backwards-compatibility reasons, we cannot make - * GET_FB() privileged, so just return an invalid handle - * for non-masters. */ - r->handle = 0; - ret = 0; - } - } else { - ret = -ENODEV; - } - - drm_framebuffer_unreference(fb); - - return ret; -} - -/** - * drm_mode_dirtyfb_ioctl - flush frontbuffer rendering on an FB - * @dev: drm device for the ioctl - * @data: data pointer for the ioctl - * @file_priv: drm file for the ioctl call - * - * Lookup the FB and flush out the damaged area supplied by userspace as a clip - * rectangle list. Generic userspace which does frontbuffer rendering must call - * this ioctl to flush out the changes on manual-update display outputs, e.g. - * usb display-link, mipi manual update panels or edp panel self refresh modes. - * - * Modesetting drivers which always update the frontbuffer do not need to - * implement the corresponding ->dirty framebuffer callback. - * - * Called by the user via ioctl. - * - * Returns: - * Zero on success, negative errno on failure. - */ -int drm_mode_dirtyfb_ioctl(struct drm_device *dev, - void *data, struct drm_file *file_priv) -{ - struct drm_clip_rect __user *clips_ptr; - struct drm_clip_rect *clips = NULL; - struct drm_mode_fb_dirty_cmd *r = data; - struct drm_framebuffer *fb; - unsigned flags; - int num_clips; - int ret; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -EINVAL; - - fb = drm_framebuffer_lookup(dev, r->fb_id); - if (!fb) - return -ENOENT; - - num_clips = r->num_clips; - clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr; - - if (!num_clips != !clips_ptr) { - ret = -EINVAL; - goto out_err1; - } - - flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags; - - /* If userspace annotates copy, clips must come in pairs */ - if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) { - ret = -EINVAL; - goto out_err1; - } - - if (num_clips && clips_ptr) { - if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) { - ret = -EINVAL; - goto out_err1; - } - clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL); - if (!clips) { - ret = -ENOMEM; - goto out_err1; - } - - ret = copy_from_user(clips, clips_ptr, - num_clips * sizeof(*clips)); - if (ret) { - ret = -EFAULT; - goto out_err2; - } - } - - if (fb->funcs->dirty) { - ret = fb->funcs->dirty(fb, file_priv, flags, r->color, - clips, num_clips); - } else { - ret = -ENOSYS; - } - -out_err2: - kfree(clips); -out_err1: - drm_framebuffer_unreference(fb); - - return ret; -} - -/** - * drm_fb_release - remove and free the FBs on this file - * @priv: drm file for the ioctl - * - * Destroy all the FBs associated with @filp. - * - * Called by the user via ioctl. - * - * Returns: - * Zero on success, negative errno on failure. - */ -void drm_fb_release(struct drm_file *priv) -{ - struct drm_framebuffer *fb, *tfb; - struct drm_mode_rmfb_work arg; - - INIT_LIST_HEAD(&arg.fbs); - - /* - * When the file gets released that means no one else can access the fb - * list any more, so no need to grab fpriv->fbs_lock. And we need to - * avoid upsetting lockdep since the universal cursor code adds a - * framebuffer while holding mutex locks. - * - * Note that a real deadlock between fpriv->fbs_lock and the modeset - * locks is impossible here since no one else but this function can get - * at it any more. - */ - list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) { - if (drm_framebuffer_read_refcount(fb) > 1) { - list_move_tail(&fb->filp_head, &arg.fbs); - } else { - list_del_init(&fb->filp_head); - - /* This drops the fpriv->fbs reference. */ - drm_framebuffer_unreference(fb); - } - } - - if (!list_empty(&arg.fbs)) { - INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn); - - schedule_work(&arg.work); - flush_work(&arg.work); - destroy_work_on_stack(&arg.work); - } -} - -static bool drm_property_type_valid(struct drm_property *property) -{ - if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) - return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE); - return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE); -} - -/** - * drm_property_create - create a new property type - * @dev: drm device - * @flags: flags specifying the property type - * @name: name of the property - * @num_values: number of pre-defined values - * - * This creates a new generic drm property which can then be attached to a drm - * object with drm_object_attach_property. The returned property object must be - * freed with drm_property_destroy. - * - * Note that the DRM core keeps a per-device list of properties and that, if - * drm_mode_config_cleanup() is called, it will destroy all properties created - * by the driver. - * - * Returns: - * A pointer to the newly created property on success, NULL on failure. - */ -struct drm_property *drm_property_create(struct drm_device *dev, int flags, - const char *name, int num_values) -{ - struct drm_property *property = NULL; - int ret; - - property = kzalloc(sizeof(struct drm_property), GFP_KERNEL); - if (!property) - return NULL; - - property->dev = dev; - - if (num_values) { - property->values = kcalloc(num_values, sizeof(uint64_t), - GFP_KERNEL); - if (!property->values) - goto fail; - } - - ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY); - if (ret) - goto fail; - - property->flags = flags; - property->num_values = num_values; - INIT_LIST_HEAD(&property->enum_list); - - if (name) { - strncpy(property->name, name, DRM_PROP_NAME_LEN); - property->name[DRM_PROP_NAME_LEN-1] = '\0'; - } - - list_add_tail(&property->head, &dev->mode_config.property_list); - - WARN_ON(!drm_property_type_valid(property)); - - return property; -fail: - kfree(property->values); - kfree(property); - return NULL; -} -EXPORT_SYMBOL(drm_property_create); - -/** - * drm_property_create_enum - create a new enumeration property type - * @dev: drm device - * @flags: flags specifying the property type - * @name: name of the property - * @props: enumeration lists with property values - * @num_values: number of pre-defined values - * - * This creates a new generic drm property which can then be attached to a drm - * object with drm_object_attach_property. The returned property object must be - * freed with drm_property_destroy. - * - * Userspace is only allowed to set one of the predefined values for enumeration - * properties. - * - * Returns: - * A pointer to the newly created property on success, NULL on failure. - */ -struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags, - const char *name, - const struct drm_prop_enum_list *props, - int num_values) -{ - struct drm_property *property; - int i, ret; - - flags |= DRM_MODE_PROP_ENUM; - - property = drm_property_create(dev, flags, name, num_values); - if (!property) - return NULL; - - for (i = 0; i < num_values; i++) { - ret = drm_property_add_enum(property, i, - props[i].type, - props[i].name); - if (ret) { - drm_property_destroy(dev, property); - return NULL; - } - } - - return property; -} -EXPORT_SYMBOL(drm_property_create_enum); - -/** - * drm_property_create_bitmask - create a new bitmask property type - * @dev: drm device - * @flags: flags specifying the property type - * @name: name of the property - * @props: enumeration lists with property bitflags - * @num_props: size of the @props array - * @supported_bits: bitmask of all supported enumeration values - * - * This creates a new bitmask drm property which can then be attached to a drm - * object with drm_object_attach_property. The returned property object must be - * freed with drm_property_destroy. - * - * Compared to plain enumeration properties userspace is allowed to set any - * or'ed together combination of the predefined property bitflag values - * - * Returns: - * A pointer to the newly created property on success, NULL on failure. - */ -struct drm_property *drm_property_create_bitmask(struct drm_device *dev, - int flags, const char *name, - const struct drm_prop_enum_list *props, - int num_props, - uint64_t supported_bits) -{ - struct drm_property *property; - int i, ret, index = 0; - int num_values = hweight64(supported_bits); - - flags |= DRM_MODE_PROP_BITMASK; - - property = drm_property_create(dev, flags, name, num_values); - if (!property) - return NULL; - for (i = 0; i < num_props; i++) { - if (!(supported_bits & (1ULL << props[i].type))) - continue; - - if (WARN_ON(index >= num_values)) { - drm_property_destroy(dev, property); - return NULL; - } - - ret = drm_property_add_enum(property, index++, - props[i].type, - props[i].name); - if (ret) { - drm_property_destroy(dev, property); - return NULL; - } - } - - return property; -} -EXPORT_SYMBOL(drm_property_create_bitmask); - -static struct drm_property *property_create_range(struct drm_device *dev, - int flags, const char *name, - uint64_t min, uint64_t max) -{ - struct drm_property *property; - - property = drm_property_create(dev, flags, name, 2); - if (!property) - return NULL; - - property->values[0] = min; - property->values[1] = max; - - return property; -} - -/** - * drm_property_create_range - create a new unsigned ranged property type - * @dev: drm device - * @flags: flags specifying the property type - * @name: name of the property - * @min: minimum value of the property - * @max: maximum value of the property - * - * This creates a new generic drm property which can then be attached to a drm - * object with drm_object_attach_property. The returned property object must be - * freed with drm_property_destroy. - * - * Userspace is allowed to set any unsigned integer value in the (min, max) - * range inclusive. - * - * Returns: - * A pointer to the newly created property on success, NULL on failure. - */ -struct drm_property *drm_property_create_range(struct drm_device *dev, int flags, - const char *name, - uint64_t min, uint64_t max) -{ - return property_create_range(dev, DRM_MODE_PROP_RANGE | flags, - name, min, max); -} -EXPORT_SYMBOL(drm_property_create_range); - -/** - * drm_property_create_signed_range - create a new signed ranged property type - * @dev: drm device - * @flags: flags specifying the property type - * @name: name of the property - * @min: minimum value of the property - * @max: maximum value of the property - * - * This creates a new generic drm property which can then be attached to a drm - * object with drm_object_attach_property. The returned property object must be - * freed with drm_property_destroy. - * - * Userspace is allowed to set any signed integer value in the (min, max) - * range inclusive. - * - * Returns: - * A pointer to the newly created property on success, NULL on failure. - */ -struct drm_property *drm_property_create_signed_range(struct drm_device *dev, - int flags, const char *name, - int64_t min, int64_t max) -{ - return property_create_range(dev, DRM_MODE_PROP_SIGNED_RANGE | flags, - name, I642U64(min), I642U64(max)); -} -EXPORT_SYMBOL(drm_property_create_signed_range); - -/** - * drm_property_create_object - create a new object property type - * @dev: drm device - * @flags: flags specifying the property type - * @name: name of the property - * @type: object type from DRM_MODE_OBJECT_* defines - * - * This creates a new generic drm property which can then be attached to a drm - * object with drm_object_attach_property. The returned property object must be - * freed with drm_property_destroy. - * - * Userspace is only allowed to set this to any property value of the given - * @type. Only useful for atomic properties, which is enforced. - * - * Returns: - * A pointer to the newly created property on success, NULL on failure. - */ -struct drm_property *drm_property_create_object(struct drm_device *dev, - int flags, const char *name, uint32_t type) -{ - struct drm_property *property; - - flags |= DRM_MODE_PROP_OBJECT; - - if (WARN_ON(!(flags & DRM_MODE_PROP_ATOMIC))) - return NULL; - - property = drm_property_create(dev, flags, name, 1); - if (!property) - return NULL; - - property->values[0] = type; - - return property; -} -EXPORT_SYMBOL(drm_property_create_object); - -/** - * drm_property_create_bool - create a new boolean property type - * @dev: drm device - * @flags: flags specifying the property type - * @name: name of the property - * - * This creates a new generic drm property which can then be attached to a drm - * object with drm_object_attach_property. The returned property object must be - * freed with drm_property_destroy. - * - * This is implemented as a ranged property with only {0, 1} as valid values. - * - * Returns: - * A pointer to the newly created property on success, NULL on failure. - */ -struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags, - const char *name) -{ - return drm_property_create_range(dev, flags, name, 0, 1); -} -EXPORT_SYMBOL(drm_property_create_bool); - -/** - * drm_property_add_enum - add a possible value to an enumeration property - * @property: enumeration property to change - * @index: index of the new enumeration - * @value: value of the new enumeration - * @name: symbolic name of the new enumeration - * - * This functions adds enumerations to a property. - * - * It's use is deprecated, drivers should use one of the more specific helpers - * to directly create the property with all enumerations already attached. - * - * Returns: - * Zero on success, error code on failure. - */ -int drm_property_add_enum(struct drm_property *property, int index, - uint64_t value, const char *name) -{ - struct drm_property_enum *prop_enum; - - if (!(drm_property_type_is(property, DRM_MODE_PROP_ENUM) || - drm_property_type_is(property, DRM_MODE_PROP_BITMASK))) - return -EINVAL; - - /* - * Bitmask enum properties have the additional constraint of values - * from 0 to 63 - */ - if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK) && - (value > 63)) - return -EINVAL; - - if (!list_empty(&property->enum_list)) { - list_for_each_entry(prop_enum, &property->enum_list, head) { - if (prop_enum->value == value) { - strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN); - prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0'; - return 0; - } - } - } - - prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL); - if (!prop_enum) - return -ENOMEM; - - strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN); - prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0'; - prop_enum->value = value; - - property->values[index] = value; - list_add_tail(&prop_enum->head, &property->enum_list); - return 0; -} -EXPORT_SYMBOL(drm_property_add_enum); - -/** - * drm_property_destroy - destroy a drm property - * @dev: drm device - * @property: property to destry - * - * This function frees a property including any attached resources like - * enumeration values. - */ -void drm_property_destroy(struct drm_device *dev, struct drm_property *property) -{ - struct drm_property_enum *prop_enum, *pt; - - list_for_each_entry_safe(prop_enum, pt, &property->enum_list, head) { - list_del(&prop_enum->head); - kfree(prop_enum); - } - - if (property->num_values) - kfree(property->values); - drm_mode_object_unregister(dev, &property->base); - list_del(&property->head); - kfree(property); -} -EXPORT_SYMBOL(drm_property_destroy); - -/** - * drm_object_attach_property - attach a property to a modeset object - * @obj: drm modeset object - * @property: property to attach - * @init_val: initial value of the property - * - * This attaches the given property to the modeset object with the given initial - * value. Currently this function cannot fail since the properties are stored in - * a statically sized array. - */ -void drm_object_attach_property(struct drm_mode_object *obj, - struct drm_property *property, - uint64_t init_val) -{ - int count = obj->properties->count; - - if (count == DRM_OBJECT_MAX_PROPERTY) { - WARN(1, "Failed to attach object property (type: 0x%x). Please " - "increase DRM_OBJECT_MAX_PROPERTY by 1 for each time " - "you see this message on the same object type.\n", - obj->type); - return; - } - - obj->properties->properties[count] = property; - obj->properties->values[count] = init_val; - obj->properties->count++; - if (property->flags & DRM_MODE_PROP_ATOMIC) - obj->properties->atomic_count++; -} -EXPORT_SYMBOL(drm_object_attach_property); - -/** - * drm_object_property_set_value - set the value of a property - * @obj: drm mode object to set property value for - * @property: property to set - * @val: value the property should be set to - * - * This functions sets a given property on a given object. This function only - * changes the software state of the property, it does not call into the - * driver's ->set_property callback. - * - * Returns: - * Zero on success, error code on failure. - */ -int drm_object_property_set_value(struct drm_mode_object *obj, - struct drm_property *property, uint64_t val) -{ - int i; - - for (i = 0; i < obj->properties->count; i++) { - if (obj->properties->properties[i] == property) { - obj->properties->values[i] = val; - return 0; - } - } - - return -EINVAL; -} -EXPORT_SYMBOL(drm_object_property_set_value); - -/** - * drm_object_property_get_value - retrieve the value of a property - * @obj: drm mode object to get property value from - * @property: property to retrieve - * @val: storage for the property value - * - * This function retrieves the softare state of the given property for the given - * property. Since there is no driver callback to retrieve the current property - * value this might be out of sync with the hardware, depending upon the driver - * and property. - * - * Returns: - * Zero on success, error code on failure. - */ -int drm_object_property_get_value(struct drm_mode_object *obj, - struct drm_property *property, uint64_t *val) -{ - int i; - - /* read-only properties bypass atomic mechanism and still store - * their value in obj->properties->values[].. mostly to avoid - * having to deal w/ EDID and similar props in atomic paths: - */ - if (drm_core_check_feature(property->dev, DRIVER_ATOMIC) && - !(property->flags & DRM_MODE_PROP_IMMUTABLE)) - return drm_atomic_get_property(obj, property, val); - - for (i = 0; i < obj->properties->count; i++) { - if (obj->properties->properties[i] == property) { - *val = obj->properties->values[i]; - return 0; - } - } - - return -EINVAL; -} -EXPORT_SYMBOL(drm_object_property_get_value); - -/** - * drm_mode_getproperty_ioctl - get the property metadata - * @dev: DRM device - * @data: ioctl data - * @file_priv: DRM file info - * - * This function retrieves the metadata for a given property, like the different - * possible values for an enum property or the limits for a range property. - * - * Blob properties are special - * - * Called by the user via ioctl. - * - * Returns: - * Zero on success, negative errno on failure. - */ -int drm_mode_getproperty_ioctl(struct drm_device *dev, - void *data, struct drm_file *file_priv) -{ - struct drm_mode_get_property *out_resp = data; - struct drm_property *property; - int enum_count = 0; - int value_count = 0; - int ret = 0, i; - int copied; - struct drm_property_enum *prop_enum; - struct drm_mode_property_enum __user *enum_ptr; - uint64_t __user *values_ptr; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -EINVAL; - - drm_modeset_lock_all(dev); - property = drm_property_find(dev, out_resp->prop_id); - if (!property) { - ret = -ENOENT; - goto done; - } - - if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) || - drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) { - list_for_each_entry(prop_enum, &property->enum_list, head) - enum_count++; - } - - value_count = property->num_values; - - strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN); - out_resp->name[DRM_PROP_NAME_LEN-1] = 0; - out_resp->flags = property->flags; - - if ((out_resp->count_values >= value_count) && value_count) { - values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr; - for (i = 0; i < value_count; i++) { - if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) { - ret = -EFAULT; - goto done; - } - } - } - out_resp->count_values = value_count; - - if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) || - drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) { - if ((out_resp->count_enum_blobs >= enum_count) && enum_count) { - copied = 0; - enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr; - list_for_each_entry(prop_enum, &property->enum_list, head) { - - if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) { - ret = -EFAULT; - goto done; - } - - if (copy_to_user(&enum_ptr[copied].name, - &prop_enum->name, DRM_PROP_NAME_LEN)) { - ret = -EFAULT; - goto done; - } - copied++; - } - } - out_resp->count_enum_blobs = enum_count; - } - - /* - * NOTE: The idea seems to have been to use this to read all the blob - * property values. But nothing ever added them to the corresponding - * list, userspace always used the special-purpose get_blob ioctl to - * read the value for a blob property. It also doesn't make a lot of - * sense to return values here when everything else is just metadata for - * the property itself. - */ - if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) - out_resp->count_enum_blobs = 0; -done: - drm_modeset_unlock_all(dev); - return ret; -} - -static void drm_property_free_blob(struct kref *kref) -{ - struct drm_property_blob *blob = - container_of(kref, struct drm_property_blob, base.refcount); - - mutex_lock(&blob->dev->mode_config.blob_lock); - list_del(&blob->head_global); - mutex_unlock(&blob->dev->mode_config.blob_lock); - - drm_mode_object_unregister(blob->dev, &blob->base); - - kfree(blob); -} - -/** - * drm_property_create_blob - Create new blob property - * - * Creates a new blob property for a specified DRM device, optionally - * copying data. - * - * @dev: DRM device to create property for - * @length: Length to allocate for blob data - * @data: If specified, copies data into blob - * - * Returns: - * New blob property with a single reference on success, or an ERR_PTR - * value on failure. - */ -struct drm_property_blob * -drm_property_create_blob(struct drm_device *dev, size_t length, - const void *data) -{ - struct drm_property_blob *blob; - int ret; - - if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob)) - return ERR_PTR(-EINVAL); - - blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); - if (!blob) - return ERR_PTR(-ENOMEM); - - /* This must be explicitly initialised, so we can safely call list_del - * on it in the removal handler, even if it isn't in a file list. */ - INIT_LIST_HEAD(&blob->head_file); - blob->length = length; - blob->dev = dev; - - if (data) - memcpy(blob->data, data, length); - - ret = drm_mode_object_get_reg(dev, &blob->base, DRM_MODE_OBJECT_BLOB, - true, drm_property_free_blob); - if (ret) { - kfree(blob); - return ERR_PTR(-EINVAL); - } - - mutex_lock(&dev->mode_config.blob_lock); - list_add_tail(&blob->head_global, - &dev->mode_config.property_blob_list); - mutex_unlock(&dev->mode_config.blob_lock); - - return blob; -} -EXPORT_SYMBOL(drm_property_create_blob); - -/** - * drm_property_unreference_blob - Unreference a blob property - * - * Drop a reference on a blob property. May free the object. - * - * @blob: Pointer to blob property - */ -void drm_property_unreference_blob(struct drm_property_blob *blob) -{ - if (!blob) - return; - - drm_mode_object_unreference(&blob->base); -} -EXPORT_SYMBOL(drm_property_unreference_blob); - -/** - * drm_property_destroy_user_blobs - destroy all blobs created by this client - * @dev: DRM device - * @file_priv: destroy all blobs owned by this file handle - */ -void drm_property_destroy_user_blobs(struct drm_device *dev, - struct drm_file *file_priv) -{ - struct drm_property_blob *blob, *bt; - - /* - * When the file gets released that means no one else can access the - * blob list any more, so no need to grab dev->blob_lock. - */ - list_for_each_entry_safe(blob, bt, &file_priv->blobs, head_file) { - list_del_init(&blob->head_file); - drm_property_unreference_blob(blob); - } -} - -/** - * drm_property_reference_blob - Take a reference on an existing property - * - * Take a new reference on an existing blob property. - * - * @blob: Pointer to blob property - */ -struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob) -{ - drm_mode_object_reference(&blob->base); - return blob; -} -EXPORT_SYMBOL(drm_property_reference_blob); - -/** - * drm_property_lookup_blob - look up a blob property and take a reference - * @dev: drm device - * @id: id of the blob property - * - * If successful, this takes an additional reference to the blob property. - * callers need to make sure to eventually unreference the returned property - * again, using @drm_property_unreference_blob. - */ -struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev, - uint32_t id) -{ - struct drm_mode_object *obj; - struct drm_property_blob *blob = NULL; - - obj = _object_find(dev, id, DRM_MODE_OBJECT_BLOB); - if (obj) - blob = obj_to_blob(obj); - return blob; -} -EXPORT_SYMBOL(drm_property_lookup_blob); - -/** - * drm_property_replace_global_blob - atomically replace existing blob property - * @dev: drm device - * @replace: location of blob property pointer to be replaced - * @length: length of data for new blob, or 0 for no data - * @data: content for new blob, or NULL for no data - * @obj_holds_id: optional object for property holding blob ID - * @prop_holds_id: optional property holding blob ID - * @return 0 on success or error on failure - * - * This function will atomically replace a global property in the blob list, - * optionally updating a property which holds the ID of that property. It is - * guaranteed to be atomic: no caller will be allowed to see intermediate - * results, and either the entire operation will succeed and clean up the - * previous property, or it will fail and the state will be unchanged. - * - * If length is 0 or data is NULL, no new blob will be created, and the holding - * property, if specified, will be set to 0. - * - * Access to the replace pointer is assumed to be protected by the caller, e.g. - * by holding the relevant modesetting object lock for its parent. - * - * For example, a drm_connector has a 'PATH' property, which contains the ID - * of a blob property with the value of the MST path information. Calling this - * function with replace pointing to the connector's path_blob_ptr, length and - * data set for the new path information, obj_holds_id set to the connector's - * base object, and prop_holds_id set to the path property name, will perform - * a completely atomic update. The access to path_blob_ptr is protected by the - * caller holding a lock on the connector. - */ -static int drm_property_replace_global_blob(struct drm_device *dev, - struct drm_property_blob **replace, - size_t length, - const void *data, - struct drm_mode_object *obj_holds_id, - struct drm_property *prop_holds_id) -{ - struct drm_property_blob *new_blob = NULL; - struct drm_property_blob *old_blob = NULL; - int ret; - - WARN_ON(replace == NULL); - - old_blob = *replace; - - if (length && data) { - new_blob = drm_property_create_blob(dev, length, data); - if (IS_ERR(new_blob)) - return PTR_ERR(new_blob); - } - - /* This does not need to be synchronised with blob_lock, as the - * get_properties ioctl locks all modesetting objects, and - * obj_holds_id must be locked before calling here, so we cannot - * have its value out of sync with the list membership modified - * below under blob_lock. */ - if (obj_holds_id) { - ret = drm_object_property_set_value(obj_holds_id, - prop_holds_id, - new_blob ? - new_blob->base.id : 0); - if (ret != 0) - goto err_created; - } - - drm_property_unreference_blob(old_blob); - *replace = new_blob; - - return 0; - -err_created: - drm_property_unreference_blob(new_blob); - return ret; -} - -/** - * drm_mode_getblob_ioctl - get the contents of a blob property value - * @dev: DRM device - * @data: ioctl data - * @file_priv: DRM file info - * - * This function retrieves the contents of a blob property. The value stored in - * an object's blob property is just a normal modeset object id. - * - * Called by the user via ioctl. - * - * Returns: - * Zero on success, negative errno on failure. - */ -int drm_mode_getblob_ioctl(struct drm_device *dev, - void *data, struct drm_file *file_priv) -{ - struct drm_mode_get_blob *out_resp = data; - struct drm_property_blob *blob; - int ret = 0; - void __user *blob_ptr; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -EINVAL; - - blob = drm_property_lookup_blob(dev, out_resp->blob_id); - if (!blob) - return -ENOENT; - - if (out_resp->length == blob->length) { - blob_ptr = (void __user *)(unsigned long)out_resp->data; - if (copy_to_user(blob_ptr, blob->data, blob->length)) { - ret = -EFAULT; - goto unref; - } - } - out_resp->length = blob->length; -unref: - drm_property_unreference_blob(blob); - - return ret; -} - -/** - * drm_mode_createblob_ioctl - create a new blob property - * @dev: DRM device - * @data: ioctl data - * @file_priv: DRM file info - * - * This function creates a new blob property with user-defined values. In order - * to give us sensible validation and checking when creating, rather than at - * every potential use, we also require a type to be provided upfront. - * - * Called by the user via ioctl. - * - * Returns: - * Zero on success, negative errno on failure. - */ -int drm_mode_createblob_ioctl(struct drm_device *dev, - void *data, struct drm_file *file_priv) -{ - struct drm_mode_create_blob *out_resp = data; - struct drm_property_blob *blob; - void __user *blob_ptr; - int ret = 0; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -EINVAL; - - blob = drm_property_create_blob(dev, out_resp->length, NULL); - if (IS_ERR(blob)) - return PTR_ERR(blob); - - blob_ptr = (void __user *)(unsigned long)out_resp->data; - if (copy_from_user(blob->data, blob_ptr, out_resp->length)) { - ret = -EFAULT; - goto out_blob; - } - - /* Dropping the lock between create_blob and our access here is safe - * as only the same file_priv can remove the blob; at this point, it is - * not associated with any file_priv. */ - mutex_lock(&dev->mode_config.blob_lock); - out_resp->blob_id = blob->base.id; - list_add_tail(&blob->head_file, &file_priv->blobs); - mutex_unlock(&dev->mode_config.blob_lock); - - return 0; - -out_blob: - drm_property_unreference_blob(blob); - return ret; -} - -/** - * drm_mode_destroyblob_ioctl - destroy a user blob property - * @dev: DRM device - * @data: ioctl data - * @file_priv: DRM file info - * - * Destroy an existing user-defined blob property. - * - * Called by the user via ioctl. - * - * Returns: - * Zero on success, negative errno on failure. - */ -int drm_mode_destroyblob_ioctl(struct drm_device *dev, - void *data, struct drm_file *file_priv) -{ - struct drm_mode_destroy_blob *out_resp = data; - struct drm_property_blob *blob = NULL, *bt; - bool found = false; - int ret = 0; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -EINVAL; - - blob = drm_property_lookup_blob(dev, out_resp->blob_id); - if (!blob) - return -ENOENT; - - mutex_lock(&dev->mode_config.blob_lock); - /* Ensure the property was actually created by this user. */ - list_for_each_entry(bt, &file_priv->blobs, head_file) { - if (bt == blob) { - found = true; - break; - } - } - - if (!found) { - ret = -EPERM; - goto err; - } - - /* We must drop head_file here, because we may not be the last - * reference on the blob. */ - list_del_init(&blob->head_file); - mutex_unlock(&dev->mode_config.blob_lock); - - /* One reference from lookup, and one from the filp. */ - drm_property_unreference_blob(blob); - drm_property_unreference_blob(blob); - - return 0; - -err: - mutex_unlock(&dev->mode_config.blob_lock); - drm_property_unreference_blob(blob); - - return ret; -} - -/** - * drm_mode_connector_set_path_property - set tile property on connector - * @connector: connector to set property on. - * @path: path to use for property; must not be NULL. - * - * This creates a property to expose to userspace to specify a - * connector path. This is mainly used for DisplayPort MST where - * connectors have a topology and we want to allow userspace to give - * them more meaningful names. - * - * Returns: - * Zero on success, negative errno on failure. - */ -int drm_mode_connector_set_path_property(struct drm_connector *connector, - const char *path) -{ - struct drm_device *dev = connector->dev; - int ret; - - ret = drm_property_replace_global_blob(dev, - &connector->path_blob_ptr, - strlen(path) + 1, - path, - &connector->base, - dev->mode_config.path_property); - return ret; -} -EXPORT_SYMBOL(drm_mode_connector_set_path_property); - -/** - * drm_mode_connector_set_tile_property - set tile property on connector - * @connector: connector to set property on. - * - * This looks up the tile information for a connector, and creates a - * property for userspace to parse if it exists. The property is of - * the form of 8 integers using ':' as a separator. - * - * Returns: - * Zero on success, errno on failure. - */ -int drm_mode_connector_set_tile_property(struct drm_connector *connector) -{ - struct drm_device *dev = connector->dev; - char tile[256]; - int ret; - - if (!connector->has_tile) { - ret = drm_property_replace_global_blob(dev, - &connector->tile_blob_ptr, - 0, - NULL, - &connector->base, - dev->mode_config.tile_property); - return ret; - } - - snprintf(tile, 256, "%d:%d:%d:%d:%d:%d:%d:%d", - connector->tile_group->id, connector->tile_is_single_monitor, - connector->num_h_tile, connector->num_v_tile, - connector->tile_h_loc, connector->tile_v_loc, - connector->tile_h_size, connector->tile_v_size); - - ret = drm_property_replace_global_blob(dev, - &connector->tile_blob_ptr, - strlen(tile) + 1, - tile, - &connector->base, - dev->mode_config.tile_property); - return ret; -} -EXPORT_SYMBOL(drm_mode_connector_set_tile_property); - -/** - * drm_mode_connector_update_edid_property - update the edid property of a connector - * @connector: drm connector - * @edid: new value of the edid property - * - * This function creates a new blob modeset object and assigns its id to the - * connector's edid property. - * - * Returns: - * Zero on success, negative errno on failure. - */ -int drm_mode_connector_update_edid_property(struct drm_connector *connector, - const struct edid *edid) -{ - struct drm_device *dev = connector->dev; - size_t size = 0; - int ret; - - /* ignore requests to set edid when overridden */ - if (connector->override_edid) - return 0; - - if (edid) - size = EDID_LENGTH * (1 + edid->extensions); - - ret = drm_property_replace_global_blob(dev, - &connector->edid_blob_ptr, - size, - edid, - &connector->base, - dev->mode_config.edid_property); - return ret; -} -EXPORT_SYMBOL(drm_mode_connector_update_edid_property); - -/* Some properties could refer to dynamic refcnt'd objects, or things that - * need special locking to handle lifetime issues (ie. to ensure the prop - * value doesn't become invalid part way through the property update due to - * race). The value returned by reference via 'obj' should be passed back - * to drm_property_change_valid_put() after the property is set (and the - * object to which the property is attached has a chance to take it's own - * reference). - */ -bool drm_property_change_valid_get(struct drm_property *property, - uint64_t value, struct drm_mode_object **ref) -{ - int i; - - if (property->flags & DRM_MODE_PROP_IMMUTABLE) - return false; - - *ref = NULL; - - if (drm_property_type_is(property, DRM_MODE_PROP_RANGE)) { - if (value < property->values[0] || value > property->values[1]) - return false; - return true; - } else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE)) { - int64_t svalue = U642I64(value); - - if (svalue < U642I64(property->values[0]) || - svalue > U642I64(property->values[1])) - return false; - return true; - } else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) { - uint64_t valid_mask = 0; - - for (i = 0; i < property->num_values; i++) - valid_mask |= (1ULL << property->values[i]); - return !(value & ~valid_mask); - } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) { - struct drm_property_blob *blob; - - if (value == 0) - return true; - - blob = drm_property_lookup_blob(property->dev, value); - if (blob) { - *ref = &blob->base; - return true; - } else { - return false; - } - } else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) { - /* a zero value for an object property translates to null: */ - if (value == 0) - return true; - - *ref = _object_find(property->dev, value, property->values[0]); - return *ref != NULL; - } - - for (i = 0; i < property->num_values; i++) - if (property->values[i] == value) - return true; - return false; -} - -void drm_property_change_valid_put(struct drm_property *property, - struct drm_mode_object *ref) -{ - if (!ref) - return; - - if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) { - drm_mode_object_unreference(ref); - } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) - drm_property_unreference_blob(obj_to_blob(ref)); -} - -/** - * drm_mode_connector_property_set_ioctl - set the current value of a connector property - * @dev: DRM device - * @data: ioctl data - * @file_priv: DRM file info - * - * This function sets the current value for a connectors's property. It also - * calls into a driver's ->set_property callback to update the hardware state - * - * Called by the user via ioctl. - * - * Returns: - * Zero on success, negative errno on failure. - */ -int drm_mode_connector_property_set_ioctl(struct drm_device *dev, - void *data, struct drm_file *file_priv) -{ - struct drm_mode_connector_set_property *conn_set_prop = data; - struct drm_mode_obj_set_property obj_set_prop = { - .value = conn_set_prop->value, - .prop_id = conn_set_prop->prop_id, - .obj_id = conn_set_prop->connector_id, - .obj_type = DRM_MODE_OBJECT_CONNECTOR - }; - - /* It does all the locking and checking we need */ - return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv); -} - -static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj, - struct drm_property *property, - uint64_t value) -{ - int ret = -EINVAL; - struct drm_connector *connector = obj_to_connector(obj); - - /* Do DPMS ourselves */ - if (property == connector->dev->mode_config.dpms_property) { - ret = (*connector->funcs->dpms)(connector, (int)value); - } else if (connector->funcs->set_property) - ret = connector->funcs->set_property(connector, property, value); - - /* store the property value if successful */ - if (!ret) - drm_object_property_set_value(&connector->base, property, value); - return ret; -} - -static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj, - struct drm_property *property, - uint64_t value) +int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t value) { int ret = -EINVAL; struct drm_crtc *crtc = obj_to_crtc(obj); @@ -5035,176 +1716,6 @@ int drm_mode_plane_set_obj_prop(struct drm_plane *plane, EXPORT_SYMBOL(drm_mode_plane_set_obj_prop); /** - * drm_mode_obj_get_properties_ioctl - get the current value of a object's property - * @dev: DRM device - * @data: ioctl data - * @file_priv: DRM file info - * - * This function retrieves the current value for an object's property. Compared - * to the connector specific ioctl this one is extended to also work on crtc and - * plane objects. - * - * Called by the user via ioctl. - * - * Returns: - * Zero on success, negative errno on failure. - */ -int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_mode_obj_get_properties *arg = data; - struct drm_mode_object *obj; - int ret = 0; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -EINVAL; - - drm_modeset_lock_all(dev); - - obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type); - if (!obj) { - ret = -ENOENT; - goto out; - } - if (!obj->properties) { - ret = -EINVAL; - goto out_unref; - } - - ret = get_properties(obj, file_priv->atomic, - (uint32_t __user *)(unsigned long)(arg->props_ptr), - (uint64_t __user *)(unsigned long)(arg->prop_values_ptr), - &arg->count_props); - -out_unref: - drm_mode_object_unreference(obj); -out: - drm_modeset_unlock_all(dev); - return ret; -} - -/** - * drm_mode_obj_set_property_ioctl - set the current value of an object's property - * @dev: DRM device - * @data: ioctl data - * @file_priv: DRM file info - * - * This function sets the current value for an object's property. It also calls - * into a driver's ->set_property callback to update the hardware state. - * Compared to the connector specific ioctl this one is extended to also work on - * crtc and plane objects. - * - * Called by the user via ioctl. - * - * Returns: - * Zero on success, negative errno on failure. - */ -int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_mode_obj_set_property *arg = data; - struct drm_mode_object *arg_obj; - struct drm_mode_object *prop_obj; - struct drm_property *property; - int i, ret = -EINVAL; - struct drm_mode_object *ref; - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -EINVAL; - - drm_modeset_lock_all(dev); - - arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type); - if (!arg_obj) { - ret = -ENOENT; - goto out; - } - if (!arg_obj->properties) - goto out_unref; - - for (i = 0; i < arg_obj->properties->count; i++) - if (arg_obj->properties->properties[i]->base.id == arg->prop_id) - break; - - if (i == arg_obj->properties->count) - goto out_unref; - - prop_obj = drm_mode_object_find(dev, arg->prop_id, - DRM_MODE_OBJECT_PROPERTY); - if (!prop_obj) { - ret = -ENOENT; - goto out_unref; - } - property = obj_to_property(prop_obj); - - if (!drm_property_change_valid_get(property, arg->value, &ref)) - goto out_unref; - - switch (arg_obj->type) { - case DRM_MODE_OBJECT_CONNECTOR: - ret = drm_mode_connector_set_obj_prop(arg_obj, property, - arg->value); - break; - case DRM_MODE_OBJECT_CRTC: - ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value); - break; - case DRM_MODE_OBJECT_PLANE: - ret = drm_mode_plane_set_obj_prop(obj_to_plane(arg_obj), - property, arg->value); - break; - } - - drm_property_change_valid_put(property, ref); - -out_unref: - drm_mode_object_unreference(arg_obj); -out: - drm_modeset_unlock_all(dev); - return ret; -} - -/** - * drm_mode_connector_attach_encoder - attach a connector to an encoder - * @connector: connector to attach - * @encoder: encoder to attach @connector to - * - * This function links up a connector to an encoder. Note that the routing - * restrictions between encoders and crtcs are exposed to userspace through the - * possible_clones and possible_crtcs bitmasks. - * - * Returns: - * Zero on success, negative errno on failure. - */ -int drm_mode_connector_attach_encoder(struct drm_connector *connector, - struct drm_encoder *encoder) -{ - int i; - - /* - * In the past, drivers have attempted to model the static association - * of connector to encoder in simple connector/encoder devices using a - * direct assignment of connector->encoder = encoder. This connection - * is a logical one and the responsibility of the core, so drivers are - * expected not to mess with this. - * - * Note that the error return should've been enough here, but a large - * majority of drivers ignores the return value, so add in a big WARN - * to get people's attention. - */ - if (WARN_ON(connector->encoder)) - return -EINVAL; - - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - if (connector->encoder_ids[i] == 0) { - connector->encoder_ids[i] = encoder->base.id; - return 0; - } - } - return -ENOMEM; -} -EXPORT_SYMBOL(drm_mode_connector_attach_encoder); - -/** * drm_mode_crtc_set_gamma_size - set the gamma table size * @crtc: CRTC to set the gamma table size for * @gamma_size: size of the gamma table @@ -5400,14 +1911,23 @@ out: int drm_mode_page_flip_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_mode_crtc_page_flip *page_flip = data; + struct drm_mode_crtc_page_flip_target *page_flip = data; struct drm_crtc *crtc; struct drm_framebuffer *fb = NULL; struct drm_pending_vblank_event *e = NULL; + u32 target_vblank = page_flip->sequence; int ret = -EINVAL; - if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || - page_flip->reserved != 0) + if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS) + return -EINVAL; + + if (page_flip->sequence != 0 && !(page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET)) + return -EINVAL; + + /* Only one of the DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE/RELATIVE flags + * can be specified + */ + if ((page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) == DRM_MODE_PAGE_FLIP_TARGET) return -EINVAL; if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip) @@ -5417,6 +1937,45 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, if (!crtc) return -ENOENT; + if (crtc->funcs->page_flip_target) { + u32 current_vblank; + int r; + + r = drm_crtc_vblank_get(crtc); + if (r) + return r; + + current_vblank = drm_crtc_vblank_count(crtc); + + switch (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) { + case DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE: + if ((int)(target_vblank - current_vblank) > 1) { + DRM_DEBUG("Invalid absolute flip target %u, " + "must be <= %u\n", target_vblank, + current_vblank + 1); + drm_crtc_vblank_put(crtc); + return -EINVAL; + } + break; + case DRM_MODE_PAGE_FLIP_TARGET_RELATIVE: + if (target_vblank != 0 && target_vblank != 1) { + DRM_DEBUG("Invalid relative flip target %u, " + "must be 0 or 1\n", target_vblank); + drm_crtc_vblank_put(crtc); + return -EINVAL; + } + target_vblank += current_vblank; + break; + default: + target_vblank = current_vblank + + !(page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC); + break; + } + } else if (crtc->funcs->page_flip == NULL || + (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET)) { + return -EINVAL; + } + drm_modeset_lock_crtc(crtc, crtc->primary); if (crtc->primary->fb == NULL) { /* The framebuffer is currently unbound, presumably @@ -5427,9 +1986,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, goto out; } - if (crtc->funcs->page_flip == NULL) - goto out; - fb = drm_framebuffer_lookup(dev, page_flip->fb_id); if (!fb) { ret = -ENOENT; @@ -5470,7 +2026,12 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, } crtc->primary->old_fb = crtc->primary->fb; - ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags); + if (crtc->funcs->page_flip_target) + ret = crtc->funcs->page_flip_target(crtc, fb, e, + page_flip->flags, + target_vblank); + else + ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags); if (ret) { if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) drm_event_cancel_free(dev, &e->base); @@ -5483,6 +2044,8 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, } out: + if (ret && crtc->funcs->page_flip_target) + drm_crtc_vblank_put(crtc); if (fb) drm_framebuffer_unreference(fb); if (crtc->primary->old_fb) @@ -5646,9 +2209,9 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, * Eg. if the hardware supports everything except DRM_REFLECT_X * one could call this function like this: * - * drm_rotation_simplify(rotation, BIT(DRM_ROTATE_0) | - * BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_180) | - * BIT(DRM_ROTATE_270) | BIT(DRM_REFLECT_Y)); + * drm_rotation_simplify(rotation, DRM_ROTATE_0 | + * DRM_ROTATE_90 | DRM_ROTATE_180 | + * DRM_ROTATE_270 | DRM_REFLECT_Y); * * to eliminate the DRM_ROTATE_X flag. Depending on what kind of * transforms the hardware supports, this function may not @@ -5659,7 +2222,7 @@ unsigned int drm_rotation_simplify(unsigned int rotation, unsigned int supported_rotations) { if (rotation & ~supported_rotations) { - rotation ^= BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y); + rotation ^= DRM_REFLECT_X | DRM_REFLECT_Y; rotation = (rotation & DRM_REFLECT_MASK) | BIT((ffs(rotation & DRM_ROTATE_MASK) + 1) % 4); } @@ -5788,12 +2351,12 @@ struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev, unsigned int supported_rotations) { static const struct drm_prop_enum_list props[] = { - { DRM_ROTATE_0, "rotate-0" }, - { DRM_ROTATE_90, "rotate-90" }, - { DRM_ROTATE_180, "rotate-180" }, - { DRM_ROTATE_270, "rotate-270" }, - { DRM_REFLECT_X, "reflect-x" }, - { DRM_REFLECT_Y, "reflect-y" }, + { __builtin_ffs(DRM_ROTATE_0) - 1, "rotate-0" }, + { __builtin_ffs(DRM_ROTATE_90) - 1, "rotate-90" }, + { __builtin_ffs(DRM_ROTATE_180) - 1, "rotate-180" }, + { __builtin_ffs(DRM_ROTATE_270) - 1, "rotate-270" }, + { __builtin_ffs(DRM_REFLECT_X) - 1, "reflect-x" }, + { __builtin_ffs(DRM_REFLECT_Y) - 1, "reflect-y" }, }; return drm_property_create_bitmask(dev, 0, "rotation", diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 604d3ef72ffa..5d2cb138eba6 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -75,35 +75,6 @@ */ /** - * drm_helper_move_panel_connectors_to_head() - move panels to the front in the - * connector list - * @dev: drm device to operate on - * - * Some userspace presumes that the first connected connector is the main - * display, where it's supposed to display e.g. the login screen. For - * laptops, this should be the main panel. Use this function to sort all - * (eDP/LVDS) panels to the front of the connector list, instead of - * painstakingly trying to initialize them in the right order. - */ -void drm_helper_move_panel_connectors_to_head(struct drm_device *dev) -{ - struct drm_connector *connector, *tmp; - struct list_head panel_list; - - INIT_LIST_HEAD(&panel_list); - - list_for_each_entry_safe(connector, tmp, - &dev->mode_config.connector_list, head) { - if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS || - connector->connector_type == DRM_MODE_CONNECTOR_eDP) - list_move_tail(&connector->head, &panel_list); - } - - list_splice(&panel_list, &dev->mode_config.connector_list); -} -EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head); - -/** * drm_helper_encoder_in_use - check if a given encoder is in use * @encoder: encoder to check * @@ -913,33 +884,6 @@ int drm_helper_connector_dpms(struct drm_connector *connector, int mode) EXPORT_SYMBOL(drm_helper_connector_dpms); /** - * drm_helper_mode_fill_fb_struct - fill out framebuffer metadata - * @fb: drm_framebuffer object to fill out - * @mode_cmd: metadata from the userspace fb creation request - * - * This helper can be used in a drivers fb_create callback to pre-fill the fb's - * metadata fields. - */ -void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, - const struct drm_mode_fb_cmd2 *mode_cmd) -{ - int i; - - fb->width = mode_cmd->width; - fb->height = mode_cmd->height; - for (i = 0; i < 4; i++) { - fb->pitches[i] = mode_cmd->pitches[i]; - fb->offsets[i] = mode_cmd->offsets[i]; - fb->modifier[i] = mode_cmd->modifier[i]; - } - drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth, - &fb->bits_per_pixel); - fb->pixel_format = mode_cmd->pixel_format; - fb->flags = mode_cmd->flags; -} -EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct); - -/** * drm_helper_resume_force_mode - force-restore mode setting configuration * @dev: drm_device which should be restored * diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h new file mode 100644 index 000000000000..4e6b57ae7188 --- /dev/null +++ b/drivers/gpu/drm/drm_crtc_helper_internal.h @@ -0,0 +1,58 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * This header file contains mode setting related functions and definitions + * which are only used within the drm kms helper module as internal + * implementation details and are not exported to drivers. + */ + +#include <drm/drm_dp_helper.h> + +/* drm_fb_helper.c */ +int drm_fb_helper_modinit(void); + +/* drm_dp_aux_dev.c */ +#ifdef CONFIG_DRM_DP_AUX_CHARDEV +int drm_dp_aux_dev_init(void); +void drm_dp_aux_dev_exit(void); +int drm_dp_aux_register_devnode(struct drm_dp_aux *aux); +void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux); +#else +static inline int drm_dp_aux_dev_init(void) +{ + return 0; +} + +static inline void drm_dp_aux_dev_exit(void) +{ +} + +static inline int drm_dp_aux_register_devnode(struct drm_dp_aux *aux) +{ + return 0; +} + +static inline void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux) +{ +} +#endif diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index 0c34e6d906d1..a3622644bccf 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h @@ -33,18 +33,9 @@ /* drm_crtc.c */ -void drm_connector_ida_init(void); -void drm_connector_ida_destroy(void); -int drm_mode_object_get(struct drm_device *dev, - struct drm_mode_object *obj, uint32_t obj_type); -void drm_mode_object_unregister(struct drm_device *dev, - struct drm_mode_object *object); -bool drm_property_change_valid_get(struct drm_property *property, - uint64_t value, - struct drm_mode_object **ref); -void drm_property_change_valid_put(struct drm_property *property, - struct drm_mode_object *ref); - +int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t value); int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format); int drm_crtc_check_viewport(const struct drm_crtc *crtc, @@ -53,8 +44,6 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc, const struct drm_framebuffer *fb); void drm_fb_release(struct drm_file *file_priv); -void drm_property_destroy_user_blobs(struct drm_device *dev, - struct drm_file *file_priv); /* dumb buffer support IOCTLs */ int drm_mode_create_dumb_ioctl(struct drm_device *dev, @@ -64,32 +53,13 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev, int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -/* framebuffer IOCTLs */ -extern int drm_mode_addfb(struct drm_device *dev, - void *data, struct drm_file *file_priv); -extern int drm_mode_addfb2(struct drm_device *dev, - void *data, struct drm_file *file_priv); -int drm_mode_rmfb(struct drm_device *dev, - void *data, struct drm_file *file_priv); -int drm_mode_getfb(struct drm_device *dev, - void *data, struct drm_file *file_priv); -int drm_mode_dirtyfb_ioctl(struct drm_device *dev, - void *data, struct drm_file *file_priv); - /* IOCTLs */ -int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); - int drm_mode_getresources(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_mode_getplane_res(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_mode_getcrtc(struct drm_device *dev, void *data, struct drm_file *file_priv); -int drm_mode_getconnector(struct drm_device *dev, - void *data, struct drm_file *file_priv); int drm_mode_setcrtc(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_mode_getplane(struct drm_device *dev, @@ -100,6 +70,24 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_mode_cursor2_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int drm_mode_gamma_get_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_gamma_set_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +int drm_mode_page_flip_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +/* drm_property.c */ +void drm_property_destroy_user_blobs(struct drm_device *dev, + struct drm_file *file_priv); +bool drm_property_change_valid_get(struct drm_property *property, + uint64_t value, + struct drm_mode_object **ref); +void drm_property_change_valid_put(struct drm_property *property, + struct drm_mode_object *ref); + +/* IOCTL */ int drm_mode_getproperty_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_mode_getblob_ioctl(struct drm_device *dev, @@ -108,17 +96,75 @@ int drm_mode_createblob_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_mode_destroyblob_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -int drm_mode_connector_property_set_ioctl(struct drm_device *dev, - void *data, struct drm_file *file_priv); + +/* drm_mode_object.c */ +int drm_mode_object_get_reg(struct drm_device *dev, + struct drm_mode_object *obj, + uint32_t obj_type, + bool register_obj, + void (*obj_free_cb)(struct kref *kref)); +void drm_mode_object_register(struct drm_device *dev, + struct drm_mode_object *obj); +int drm_mode_object_get(struct drm_device *dev, + struct drm_mode_object *obj, uint32_t obj_type); +struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev, + uint32_t id, uint32_t type); +void drm_mode_object_unregister(struct drm_device *dev, + struct drm_mode_object *object); +int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic, + uint32_t __user *prop_ptr, + uint64_t __user *prop_values, + uint32_t *arg_count_props); + +/* IOCTL */ + +int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +/* drm_encoder.c */ +int drm_encoder_register_all(struct drm_device *dev); +void drm_encoder_unregister_all(struct drm_device *dev); + +/* IOCTL */ int drm_mode_getencoder(struct drm_device *dev, void *data, struct drm_file *file_priv); -int drm_mode_gamma_get_ioctl(struct drm_device *dev, - void *data, struct drm_file *file_priv); -int drm_mode_gamma_set_ioctl(struct drm_device *dev, - void *data, struct drm_file *file_priv); -int drm_mode_page_flip_ioctl(struct drm_device *dev, - void *data, struct drm_file *file_priv); +/* drm_connector.c */ +void drm_connector_ida_init(void); +void drm_connector_ida_destroy(void); +void drm_connector_unregister_all(struct drm_device *dev); +int drm_connector_register_all(struct drm_device *dev); +int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t value); +int drm_connector_create_standard_properties(struct drm_device *dev); + +/* IOCTL */ +int drm_mode_connector_property_set_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_getconnector(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +/* drm_framebuffer.c */ +struct drm_framebuffer * +drm_internal_framebuffer_create(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *r, + struct drm_file *file_priv); +void drm_framebuffer_free(struct kref *kref); + +/* IOCTL */ +int drm_mode_addfb(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_addfb2(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_rmfb(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_getfb(struct drm_device *dev, + void *data, struct drm_file *file_priv); +int drm_mode_dirtyfb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); /* drm_atomic.c */ int drm_atomic_get_property(struct drm_mode_object *obj, @@ -130,5 +176,5 @@ int drm_modeset_register_all(struct drm_device *dev); void drm_modeset_unregister_all(struct drm_device *dev); /* drm_blend.c */ -int drm_atomic_helper_normalize_zpos(struct drm_device *dev, - struct drm_atomic_state *state); +int drm_atomic_normalize_zpos(struct drm_device *dev, + struct drm_atomic_state *state); diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c index ea481800ef56..3f83e2ca80ad 100644 --- a/drivers/gpu/drm/drm_dma.c +++ b/drivers/gpu/drm/drm_dma.c @@ -50,9 +50,8 @@ int drm_legacy_dma_setup(struct drm_device *dev) int i; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) || - drm_core_check_feature(dev, DRIVER_MODESET)) { + !drm_core_check_feature(dev, DRIVER_LEGACY)) return 0; - } dev->buf_use = 0; atomic_set(&dev->buf_alloc, 0); @@ -81,9 +80,8 @@ void drm_legacy_dma_takedown(struct drm_device *dev) int i, j; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) || - drm_core_check_feature(dev, DRIVER_MODESET)) { + !drm_core_check_feature(dev, DRIVER_LEGACY)) return; - } if (!dma) return; diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c index 734f86a345f6..ec1ed94b2390 100644 --- a/drivers/gpu/drm/drm_dp_aux_dev.c +++ b/drivers/gpu/drm/drm_dp_aux_dev.c @@ -36,6 +36,8 @@ #include <drm/drm_crtc.h> #include <drm/drmP.h> +#include "drm_crtc_helper_internal.h" + struct drm_dp_aux_dev { unsigned index; struct drm_dp_aux *aux; @@ -283,12 +285,7 @@ static int auxdev_wait_atomic_t(atomic_t *p) schedule(); return 0; } -/** - * drm_dp_aux_unregister_devnode() - unregister a devnode for this aux channel - * @aux: DisplayPort AUX channel - * - * Returns 0 on success or a negative error code on failure. - */ + void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux) { struct drm_dp_aux_dev *aux_dev; @@ -314,14 +311,7 @@ void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux) DRM_DEBUG("drm_dp_aux_dev: aux [%s] unregistering\n", aux->name); kref_put(&aux_dev->refcount, release_drm_dp_aux_dev); } -EXPORT_SYMBOL(drm_dp_aux_unregister_devnode); -/** - * drm_dp_aux_register_devnode() - register a devnode for this aux channel - * @aux: DisplayPort AUX channel - * - * Returns 0 on success or a negative error code on failure. - */ int drm_dp_aux_register_devnode(struct drm_dp_aux *aux) { struct drm_dp_aux_dev *aux_dev; @@ -347,7 +337,6 @@ error: drm_dp_aux_unregister_devnode(aux); return res; } -EXPORT_SYMBOL(drm_dp_aux_register_devnode); int drm_dp_aux_dev_init(void) { @@ -369,11 +358,9 @@ out: class_destroy(drm_dp_aux_dev_class); return res; } -EXPORT_SYMBOL(drm_dp_aux_dev_init); void drm_dp_aux_dev_exit(void) { unregister_chrdev(drm_dev_major, "aux"); class_destroy(drm_dp_aux_dev_class); } -EXPORT_SYMBOL(drm_dp_aux_dev_exit); diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index eae5ef963cb7..031c4d335b08 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -28,9 +28,10 @@ #include <linux/sched.h> #include <linux/i2c.h> #include <drm/drm_dp_helper.h> -#include <drm/drm_dp_aux_dev.h> #include <drm/drmP.h> +#include "drm_crtc_helper_internal.h" + /** * DOC: dp helpers * @@ -223,7 +224,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, err = ret; } - DRM_DEBUG_KMS("too many retries, giving up\n"); + DRM_DEBUG_KMS("Too many retries, giving up. First error: %d\n", err); ret = err; unlock: @@ -574,7 +575,17 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) if (ret == -EBUSY) continue; - DRM_DEBUG_KMS("transaction failed: %d\n", ret); + /* + * While timeouts can be errors, they're usually normal + * behavior (for instance, when a driver tries to + * communicate with a non-existant DisplayPort device). + * Avoid spamming the kernel log with timeout errors. + */ + if (ret == -ETIMEDOUT) + DRM_DEBUG_KMS_RATELIMITED("transaction timed out\n"); + else + DRM_DEBUG_KMS("transaction failed: %d\n", ret); + return ret; } diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index be27ed36f56e..acf6a5f38920 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -63,37 +63,51 @@ static struct idr drm_minors_idr; static struct dentry *drm_debugfs_root; -void drm_err(const char *format, ...) +#define DRM_PRINTK_FMT "[" DRM_NAME ":%s]%s %pV" + +void drm_dev_printk(const struct device *dev, const char *level, + unsigned int category, const char *function_name, + const char *prefix, const char *format, ...) { struct va_format vaf; va_list args; - va_start(args, format); + if (category != DRM_UT_NONE && !(drm_debug & category)) + return; + va_start(args, format); vaf.fmt = format; vaf.va = &args; - printk(KERN_ERR "[" DRM_NAME ":%ps] *ERROR* %pV", - __builtin_return_address(0), &vaf); + if (dev) + dev_printk(level, dev, DRM_PRINTK_FMT, function_name, prefix, + &vaf); + else + printk("%s" DRM_PRINTK_FMT, level, function_name, prefix, &vaf); va_end(args); } -EXPORT_SYMBOL(drm_err); +EXPORT_SYMBOL(drm_dev_printk); -void drm_ut_debug_printk(const char *function_name, const char *format, ...) +void drm_printk(const char *level, unsigned int category, + const char *function_name, const char *prefix, + const char *format, ...) { struct va_format vaf; va_list args; + if (category != DRM_UT_NONE && !(drm_debug & category)) + return; + va_start(args, format); vaf.fmt = format; vaf.va = &args; - printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf); + printk("%s" DRM_PRINTK_FMT, level, function_name, prefix, &vaf); va_end(args); } -EXPORT_SYMBOL(drm_ut_debug_printk); +EXPORT_SYMBOL(drm_printk); /* * DRM Minors @@ -112,7 +126,7 @@ static struct drm_minor **drm_minor_get_slot(struct drm_device *dev, unsigned int type) { switch (type) { - case DRM_MINOR_LEGACY: + case DRM_MINOR_PRIMARY: return &dev->primary; case DRM_MINOR_RENDER: return &dev->render; @@ -512,7 +526,7 @@ int drm_dev_init(struct drm_device *dev, goto err_minors; } - ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY); + ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY); if (ret) goto err_minors; @@ -545,7 +559,7 @@ err_ctxbitmap: drm_legacy_ctxbitmap_cleanup(dev); drm_ht_remove(&dev->map_hash); err_minors: - drm_minor_free(dev, DRM_MINOR_LEGACY); + drm_minor_free(dev, DRM_MINOR_PRIMARY); drm_minor_free(dev, DRM_MINOR_RENDER); drm_minor_free(dev, DRM_MINOR_CONTROL); drm_fs_inode_free(dev->anon_inode); @@ -608,7 +622,7 @@ static void drm_dev_release(struct kref *ref) drm_ht_remove(&dev->map_hash); drm_fs_inode_free(dev->anon_inode); - drm_minor_free(dev, DRM_MINOR_LEGACY); + drm_minor_free(dev, DRM_MINOR_PRIMARY); drm_minor_free(dev, DRM_MINOR_RENDER); drm_minor_free(dev, DRM_MINOR_CONTROL); @@ -684,7 +698,7 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) if (ret) goto err_minors; - ret = drm_minor_register(dev, DRM_MINOR_LEGACY); + ret = drm_minor_register(dev, DRM_MINOR_PRIMARY); if (ret) goto err_minors; @@ -701,7 +715,7 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) goto out_unlock; err_minors: - drm_minor_unregister(dev, DRM_MINOR_LEGACY); + drm_minor_unregister(dev, DRM_MINOR_PRIMARY); drm_minor_unregister(dev, DRM_MINOR_RENDER); drm_minor_unregister(dev, DRM_MINOR_CONTROL); out_unlock: @@ -741,7 +755,7 @@ void drm_dev_unregister(struct drm_device *dev) list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) drm_legacy_rmmap(dev, r_list->map); - drm_minor_unregister(dev, DRM_MINOR_LEGACY); + drm_minor_unregister(dev, DRM_MINOR_PRIMARY); drm_minor_unregister(dev, DRM_MINOR_RENDER); drm_minor_unregister(dev, DRM_MINOR_CONTROL); } diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 7df26d4b7ad8..50541324a4ab 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -74,6 +74,8 @@ #define EDID_QUIRK_FORCE_8BPC (1 << 8) /* Force 12bpc */ #define EDID_QUIRK_FORCE_12BPC (1 << 9) +/* Force 6bpc */ +#define EDID_QUIRK_FORCE_6BPC (1 << 10) struct detailed_mode_closure { struct drm_connector *connector; @@ -100,6 +102,9 @@ static struct edid_quirk { /* Unknown Acer */ { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, + /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */ + { "AEO", 0, EDID_QUIRK_FORCE_6BPC }, + /* Belinea 10 15 55 */ { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, @@ -986,7 +991,7 @@ static const struct drm_display_mode edid_cea_modes[] = { .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, /* 64 - 1920x1080@100Hz */ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448, - 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, + 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, }; @@ -3716,14 +3721,7 @@ bool drm_rgb_quant_range_selectable(struct edid *edid) } EXPORT_SYMBOL(drm_rgb_quant_range_selectable); -/** - * drm_assign_hdmi_deep_color_info - detect whether monitor supports - * hdmi deep color modes and update drm_display_info if so. - * @edid: monitor EDID information - * @info: Updated with maximum supported deep color bpc and color format - * if deep color supported. - * @connector: DRM connector, used only for debug output - * +/* * Parse the CEA extension according to CEA-861-B. * Return true if HDMI deep color supported, false if not or unknown. */ @@ -3817,16 +3815,6 @@ static bool drm_assign_hdmi_deep_color_info(struct edid *edid, return false; } -/** - * drm_add_display_info - pull display info out if present - * @edid: EDID data - * @info: display info (attached to connector) - * @connector: connector whose edid is used to build display info - * - * Grab any available display info and stuff it into the drm_display_info - * structure that's part of the connector. Useful for tracking bpp and - * color spaces. - */ static void drm_add_display_info(struct edid *edid, struct drm_display_info *info, struct drm_connector *connector) @@ -3862,6 +3850,20 @@ static void drm_add_display_info(struct edid *edid, /* HDMI deep color modes supported? Assign to info, if so */ drm_assign_hdmi_deep_color_info(edid, info, connector); + /* + * Digital sink with "DFP 1.x compliant TMDS" according to EDID 1.3? + * + * For such displays, the DFP spec 1.0, section 3.10 "EDID support" + * tells us to assume 8 bpc color depth if the EDID doesn't have + * extensions which tell otherwise. + */ + if ((info->bpc == 0) && (edid->revision < 4) && + (edid->input & DRM_EDID_DIGITAL_TYPE_DVI)) { + info->bpc = 8; + DRM_DEBUG("%s: Assigning DFP sink color depth as %d bpc.\n", + connector->name, info->bpc); + } + /* Only defined for 1.4 with digital displays */ if (edid->revision < 4) return; @@ -4033,7 +4035,9 @@ static int add_displayid_detailed_modes(struct drm_connector *connector, * @connector: connector we're probing * @edid: EDID data * - * Add the specified modes to the connector's mode list. + * Add the specified modes to the connector's mode list. Also fills out the + * &drm_display_info structure in @connector with any information which can be + * derived from the edid. * * Return: The number of modes added or 0 if we couldn't find any. */ @@ -4082,6 +4086,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) drm_add_display_info(edid, &connector->display_info, connector); + if (quirks & EDID_QUIRK_FORCE_6BPC) + connector->display_info.bpc = 6; + if (quirks & EDID_QUIRK_FORCE_8BPC) connector->display_info.bpc = 8; diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c new file mode 100644 index 000000000000..998a6743a586 --- /dev/null +++ b/drivers/gpu/drm/drm_encoder.c @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include <linux/export.h> +#include <drm/drmP.h> +#include <drm/drm_encoder.h> + +#include "drm_crtc_internal.h" + +/** + * DOC: overview + * + * Encoders represent the connecting element between the CRTC (as the overall + * pixel pipeline, represented by struct &drm_crtc) and the connectors (as the + * generic sink entity, represented by struct &drm_connector). Encoders are + * objects exposed to userspace, originally to allow userspace to infer cloning + * and connector/CRTC restrictions. Unfortunately almost all drivers get this + * wrong, making the uabi pretty much useless. On top of that the exposed + * restrictions are too simple for todays hardware, and the recommend way to + * infer restrictions is by using the DRM_MODE_ATOMIC_TEST_ONLY flag for the + * atomic IOCTL. + * + * Otherwise encoders aren't used in the uapi at all (any modeset request from + * userspace directly connects a connector with a CRTC), drivers are therefore + * free to use them however they wish. Modeset helper libraries make strong use + * of encoders to facilitate code sharing. But for more complex settings it is + * usually better to move shared code into a separate &drm_bridge. Compared to + * encoders bridges also have the benefit of not being purely an internal + * abstraction since they are not exposed to userspace at all. + * + * Encoders are initialized with drm_encoder_init() and cleaned up using + * drm_encoder_cleanup(). + */ +static const struct drm_prop_enum_list drm_encoder_enum_list[] = { + { DRM_MODE_ENCODER_NONE, "None" }, + { DRM_MODE_ENCODER_DAC, "DAC" }, + { DRM_MODE_ENCODER_TMDS, "TMDS" }, + { DRM_MODE_ENCODER_LVDS, "LVDS" }, + { DRM_MODE_ENCODER_TVDAC, "TV" }, + { DRM_MODE_ENCODER_VIRTUAL, "Virtual" }, + { DRM_MODE_ENCODER_DSI, "DSI" }, + { DRM_MODE_ENCODER_DPMST, "DP MST" }, + { DRM_MODE_ENCODER_DPI, "DPI" }, +}; + +int drm_encoder_register_all(struct drm_device *dev) +{ + struct drm_encoder *encoder; + int ret = 0; + + drm_for_each_encoder(encoder, dev) { + if (encoder->funcs->late_register) + ret = encoder->funcs->late_register(encoder); + if (ret) + return ret; + } + + return 0; +} + +void drm_encoder_unregister_all(struct drm_device *dev) +{ + struct drm_encoder *encoder; + + drm_for_each_encoder(encoder, dev) { + if (encoder->funcs->early_unregister) + encoder->funcs->early_unregister(encoder); + } +} + +/** + * drm_encoder_init - Init a preallocated encoder + * @dev: drm device + * @encoder: the encoder to init + * @funcs: callbacks for this encoder + * @encoder_type: user visible type of the encoder + * @name: printf style format string for the encoder name, or NULL for default name + * + * Initialises a preallocated encoder. Encoder should be subclassed as part of + * driver encoder objects. At driver unload time drm_encoder_cleanup() should be + * called from the driver's destroy hook in &drm_encoder_funcs. + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_encoder_init(struct drm_device *dev, + struct drm_encoder *encoder, + const struct drm_encoder_funcs *funcs, + int encoder_type, const char *name, ...) +{ + int ret; + + drm_modeset_lock_all(dev); + + ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER); + if (ret) + goto out_unlock; + + encoder->dev = dev; + encoder->encoder_type = encoder_type; + encoder->funcs = funcs; + if (name) { + va_list ap; + + va_start(ap, name); + encoder->name = kvasprintf(GFP_KERNEL, name, ap); + va_end(ap); + } else { + encoder->name = kasprintf(GFP_KERNEL, "%s-%d", + drm_encoder_enum_list[encoder_type].name, + encoder->base.id); + } + if (!encoder->name) { + ret = -ENOMEM; + goto out_put; + } + + list_add_tail(&encoder->head, &dev->mode_config.encoder_list); + encoder->index = dev->mode_config.num_encoder++; + +out_put: + if (ret) + drm_mode_object_unregister(dev, &encoder->base); + +out_unlock: + drm_modeset_unlock_all(dev); + + return ret; +} +EXPORT_SYMBOL(drm_encoder_init); + +/** + * drm_encoder_cleanup - cleans up an initialised encoder + * @encoder: encoder to cleanup + * + * Cleans up the encoder but doesn't free the object. + */ +void drm_encoder_cleanup(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + + /* Note that the encoder_list is considered to be static; should we + * remove the drm_encoder at runtime we would have to decrement all + * the indices on the drm_encoder after us in the encoder_list. + */ + + drm_modeset_lock_all(dev); + drm_mode_object_unregister(dev, &encoder->base); + kfree(encoder->name); + list_del(&encoder->head); + dev->mode_config.num_encoder--; + drm_modeset_unlock_all(dev); + + memset(encoder, 0, sizeof(*encoder)); +} +EXPORT_SYMBOL(drm_encoder_cleanup); + +static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder) +{ + struct drm_connector *connector; + struct drm_device *dev = encoder->dev; + bool uses_atomic = false; + + /* For atomic drivers only state objects are synchronously updated and + * protected by modeset locks, so check those first. */ + drm_for_each_connector(connector, dev) { + if (!connector->state) + continue; + + uses_atomic = true; + + if (connector->state->best_encoder != encoder) + continue; + + return connector->state->crtc; + } + + /* Don't return stale data (e.g. pending async disable). */ + if (uses_atomic) + return NULL; + + return encoder->crtc; +} + +int drm_mode_getencoder(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_mode_get_encoder *enc_resp = data; + struct drm_encoder *encoder; + struct drm_crtc *crtc; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + encoder = drm_encoder_find(dev, enc_resp->encoder_id); + if (!encoder) + return -ENOENT; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + crtc = drm_encoder_get_crtc(encoder); + if (crtc) + enc_resp->crtc_id = crtc->base.id; + else + enc_resp->crtc_id = 0; + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + enc_resp->encoder_type = encoder->encoder_type; + enc_resp->encoder_id = encoder->base.id; + enc_resp->possible_crtcs = encoder->possible_crtcs; + enc_resp->possible_clones = encoder->possible_clones; + + return 0; +} diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index ce54e985d91b..c4d350086def 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -29,10 +29,10 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/console.h> #include <linux/kernel.h> #include <linux/sysrq.h> #include <linux/slab.h> -#include <linux/fb.h> #include <linux/module.h> #include <drm/drmP.h> #include <drm/drm_crtc.h> @@ -335,7 +335,7 @@ retry: goto fail; } - plane_state->rotation = BIT(DRM_ROTATE_0); + plane_state->rotation = DRM_ROTATE_0; plane->old_fb = plane->fb; plane_mask |= 1 << drm_plane_index(plane); @@ -395,7 +395,7 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper) if (dev->mode_config.rotation_property) { drm_mode_plane_set_obj_prop(plane, dev->mode_config.rotation_property, - BIT(DRM_ROTATE_0)); + DRM_ROTATE_0); } } @@ -618,6 +618,16 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) kfree(helper->crtc_info); } +static void drm_fb_helper_resume_worker(struct work_struct *work) +{ + struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, + resume_work); + + console_lock(); + fb_set_suspend(helper->fbdev, 0); + console_unlock(); +} + static void drm_fb_helper_dirty_work(struct work_struct *work) { struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, @@ -649,6 +659,7 @@ void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, { INIT_LIST_HEAD(&helper->kernel_fb_list); spin_lock_init(&helper->dirty_lock); + INIT_WORK(&helper->resume_work, drm_fb_helper_resume_worker); INIT_WORK(&helper->dirty_work, drm_fb_helper_dirty_work); helper->dirty_clip.x1 = helper->dirty_clip.y1 = ~0; helper->funcs = funcs; @@ -1024,17 +1035,65 @@ EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit); /** * drm_fb_helper_set_suspend - wrapper around fb_set_suspend * @fb_helper: driver-allocated fbdev helper - * @state: desired state, zero to resume, non-zero to suspend + * @suspend: whether to suspend or resume * - * A wrapper around fb_set_suspend implemented by fbdev core + * A wrapper around fb_set_suspend implemented by fbdev core. + * Use drm_fb_helper_set_suspend_unlocked() if you don't need to take + * the lock yourself */ -void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state) +void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend) { if (fb_helper && fb_helper->fbdev) - fb_set_suspend(fb_helper->fbdev, state); + fb_set_suspend(fb_helper->fbdev, suspend); } EXPORT_SYMBOL(drm_fb_helper_set_suspend); +/** + * drm_fb_helper_set_suspend_unlocked - wrapper around fb_set_suspend that also + * takes the console lock + * @fb_helper: driver-allocated fbdev helper + * @suspend: whether to suspend or resume + * + * A wrapper around fb_set_suspend() that takes the console lock. If the lock + * isn't available on resume, a worker is tasked with waiting for the lock + * to become available. The console lock can be pretty contented on resume + * due to all the printk activity. + * + * This function can be called multiple times with the same state since + * &fb_info->state is checked to see if fbdev is running or not before locking. + * + * Use drm_fb_helper_set_suspend() if you need to take the lock yourself. + */ +void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, + bool suspend) +{ + if (!fb_helper || !fb_helper->fbdev) + return; + + /* make sure there's no pending/ongoing resume */ + flush_work(&fb_helper->resume_work); + + if (suspend) { + if (fb_helper->fbdev->state != FBINFO_STATE_RUNNING) + return; + + console_lock(); + + } else { + if (fb_helper->fbdev->state == FBINFO_STATE_RUNNING) + return; + + if (!console_trylock()) { + schedule_work(&fb_helper->resume_work); + return; + } + } + + fb_set_suspend(fb_helper->fbdev, suspend); + console_unlock(); +} +EXPORT_SYMBOL(drm_fb_helper_set_suspend_unlocked); + static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, u16 regno, struct fb_info *info) { @@ -2194,7 +2253,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config); * @fb_helper: the drm_fb_helper * * Scan the connectors attached to the fb_helper and try to put together a - * setup after *notification of a change in output configuration. + * setup after notification of a change in output configuration. * * Called at runtime, takes the mode config locks to be able to check/change the * modeset configuration. Must be run from process context (which usually means diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 323c238fcac7..036cd275c53e 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -92,7 +92,7 @@ static int drm_setup(struct drm_device * dev) int ret; if (dev->driver->firstopen && - !drm_core_check_feature(dev, DRIVER_MODESET)) { + drm_core_check_feature(dev, DRIVER_LEGACY)) { ret = dev->driver->firstopen(dev); if (ret != 0) return ret; @@ -346,7 +346,7 @@ void drm_lastclose(struct drm_device * dev) dev->driver->lastclose(dev); DRM_DEBUG("driver lastclose completed\n"); - if (!drm_core_check_feature(dev, DRIVER_MODESET)) + if (drm_core_check_feature(dev, DRIVER_LEGACY)) drm_legacy_dev_reinit(dev); } @@ -389,7 +389,7 @@ int drm_release(struct inode *inode, struct file *filp) (long)old_encode_dev(file_priv->minor->kdev->devt), dev->open_count); - if (!drm_core_check_feature(dev, DRIVER_MODESET)) + if (drm_core_check_feature(dev, DRIVER_LEGACY)) drm_legacy_lock_release(dev, filp); if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c index 0645c85d5f95..29c56b4331e0 100644 --- a/drivers/gpu/drm/drm_fourcc.c +++ b/drivers/gpu/drm/drm_fourcc.c @@ -36,19 +36,60 @@ static char printable_char(int c) } /** + * drm_mode_legacy_fb_format - compute drm fourcc code from legacy description + * @bpp: bits per pixels + * @depth: bit depth per pixel + * + * Computes a drm fourcc pixel format code for the given @bpp/@depth values. + * Useful in fbdev emulation code, since that deals in those values. + */ +uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth) +{ + uint32_t fmt; + + switch (bpp) { + case 8: + fmt = DRM_FORMAT_C8; + break; + case 16: + if (depth == 15) + fmt = DRM_FORMAT_XRGB1555; + else + fmt = DRM_FORMAT_RGB565; + break; + case 24: + fmt = DRM_FORMAT_RGB888; + break; + case 32: + if (depth == 24) + fmt = DRM_FORMAT_XRGB8888; + else if (depth == 30) + fmt = DRM_FORMAT_XRGB2101010; + else + fmt = DRM_FORMAT_ARGB8888; + break; + default: + DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n"); + fmt = DRM_FORMAT_XRGB8888; + break; + } + + return fmt; +} +EXPORT_SYMBOL(drm_mode_legacy_fb_format); + +/** * drm_get_format_name - return a string for drm fourcc format * @format: format to compute name of * - * Note that the buffer used by this function is globally shared and owned by - * the function itself. - * - * FIXME: This isn't really multithreading safe. + * Note that the buffer returned by this function is owned by the caller + * and will need to be freed using kfree(). */ -const char *drm_get_format_name(uint32_t format) +char *drm_get_format_name(uint32_t format) { - static char buf[32]; + char *buf = kmalloc(32, GFP_KERNEL); - snprintf(buf, sizeof(buf), + snprintf(buf, 32, "%c%c%c%c %s-endian (0x%08x)", printable_char(format & 0xff), printable_char((format >> 8) & 0xff), @@ -73,6 +114,8 @@ EXPORT_SYMBOL(drm_get_format_name); void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp) { + char *format_name; + switch (format) { case DRM_FORMAT_C8: case DRM_FORMAT_RGB332: @@ -127,8 +170,9 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, *bpp = 32; break; default: - DRM_DEBUG_KMS("unsupported pixel format %s\n", - drm_get_format_name(format)); + format_name = drm_get_format_name(format); + DRM_DEBUG_KMS("unsupported pixel format %s\n", format_name); + kfree(format_name); *depth = 0; *bpp = 0; break; diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c new file mode 100644 index 000000000000..30dc01e6eb5d --- /dev/null +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -0,0 +1,831 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include <linux/export.h> +#include <drm/drmP.h> +#include <drm/drm_auth.h> +#include <drm/drm_framebuffer.h> + +#include "drm_crtc_internal.h" + +/** + * DOC: overview + * + * Frame buffers are abstract memory objects that provide a source of pixels to + * scanout to a CRTC. Applications explicitly request the creation of frame + * buffers through the DRM_IOCTL_MODE_ADDFB(2) ioctls and receive an opaque + * handle that can be passed to the KMS CRTC control, plane configuration and + * page flip functions. + * + * Frame buffers rely on the underlying memory manager for allocating backing + * storage. When creating a frame buffer applications pass a memory handle + * (or a list of memory handles for multi-planar formats) through the + * struct &drm_mode_fb_cmd2 argument. For drivers using GEM as their userspace + * buffer management interface this would be a GEM handle. Drivers are however + * free to use their own backing storage object handles, e.g. vmwgfx directly + * exposes special TTM handles to userspace and so expects TTM handles in the + * create ioctl and not GEM handles. + * + * Framebuffers are tracked with struct &drm_framebuffer. They are published + * using drm_framebuffer_init() - after calling that function userspace can use + * and access the framebuffer object. The helper function + * drm_helper_mode_fill_fb_struct() can be used to pre-fill the required + * metadata fields. + * + * The lifetime of a drm framebuffer is controlled with a reference count, + * drivers can grab additional references with drm_framebuffer_reference() and + * drop them again with drm_framebuffer_unreference(). For driver-private + * framebuffers for which the last reference is never dropped (e.g. for the + * fbdev framebuffer when the struct struct &drm_framebuffer is embedded into + * the fbdev helper struct) drivers can manually clean up a framebuffer at + * module unload time with drm_framebuffer_unregister_private(). But doing this + * is not recommended, and it's better to have a normal free-standing struct + * &drm_framebuffer. + */ + +/** + * drm_mode_addfb - add an FB to the graphics configuration + * @dev: drm device for the ioctl + * @data: data pointer for the ioctl + * @file_priv: drm file for the ioctl call + * + * Add a new FB to the specified CRTC, given a user request. This is the + * original addfb ioctl which only supported RGB formats. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_addfb(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_fb_cmd *or = data; + struct drm_mode_fb_cmd2 r = {}; + int ret; + + /* convert to new format and call new ioctl */ + r.fb_id = or->fb_id; + r.width = or->width; + r.height = or->height; + r.pitches[0] = or->pitch; + r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth); + r.handles[0] = or->handle; + + ret = drm_mode_addfb2(dev, &r, file_priv); + if (ret) + return ret; + + or->fb_id = r.fb_id; + + return 0; +} + +static int format_check(const struct drm_mode_fb_cmd2 *r) +{ + uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN; + char *format_name; + + switch (format) { + case DRM_FORMAT_C8: + case DRM_FORMAT_RGB332: + case DRM_FORMAT_BGR233: + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_BGRX4444: + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_BGRA4444: + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_BGRX5551: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + case DRM_FORMAT_RGB888: + case DRM_FORMAT_BGR888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_RGBX1010102: + case DRM_FORMAT_BGRX1010102: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + case DRM_FORMAT_AYUV: + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + case DRM_FORMAT_NV24: + case DRM_FORMAT_NV42: + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + case DRM_FORMAT_YUV411: + case DRM_FORMAT_YVU411: + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_YVU422: + case DRM_FORMAT_YUV444: + case DRM_FORMAT_YVU444: + return 0; + default: + format_name = drm_get_format_name(r->pixel_format); + DRM_DEBUG_KMS("invalid pixel format %s\n", format_name); + kfree(format_name); + return -EINVAL; + } +} + +static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) +{ + int ret, hsub, vsub, num_planes, i; + + ret = format_check(r); + if (ret) { + char *format_name = drm_get_format_name(r->pixel_format); + DRM_DEBUG_KMS("bad framebuffer format %s\n", format_name); + kfree(format_name); + return ret; + } + + hsub = drm_format_horz_chroma_subsampling(r->pixel_format); + vsub = drm_format_vert_chroma_subsampling(r->pixel_format); + num_planes = drm_format_num_planes(r->pixel_format); + + if (r->width == 0 || r->width % hsub) { + DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width); + return -EINVAL; + } + + if (r->height == 0 || r->height % vsub) { + DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height); + return -EINVAL; + } + + for (i = 0; i < num_planes; i++) { + unsigned int width = r->width / (i != 0 ? hsub : 1); + unsigned int height = r->height / (i != 0 ? vsub : 1); + unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i); + + if (!r->handles[i]) { + DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i); + return -EINVAL; + } + + if ((uint64_t) width * cpp > UINT_MAX) + return -ERANGE; + + if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX) + return -ERANGE; + + if (r->pitches[i] < width * cpp) { + DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i); + return -EINVAL; + } + + if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) { + DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n", + r->modifier[i], i); + return -EINVAL; + } + + /* modifier specific checks: */ + switch (r->modifier[i]) { + case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE: + /* NOTE: the pitch restriction may be lifted later if it turns + * out that no hw has this restriction: + */ + if (r->pixel_format != DRM_FORMAT_NV12 || + width % 128 || height % 32 || + r->pitches[i] % 128) { + DRM_DEBUG_KMS("bad modifier data for plane %d\n", i); + return -EINVAL; + } + break; + + default: + break; + } + } + + for (i = num_planes; i < 4; i++) { + if (r->modifier[i]) { + DRM_DEBUG_KMS("non-zero modifier for unused plane %d\n", i); + return -EINVAL; + } + + /* Pre-FB_MODIFIERS userspace didn't clear the structs properly. */ + if (!(r->flags & DRM_MODE_FB_MODIFIERS)) + continue; + + if (r->handles[i]) { + DRM_DEBUG_KMS("buffer object handle for unused plane %d\n", i); + return -EINVAL; + } + + if (r->pitches[i]) { + DRM_DEBUG_KMS("non-zero pitch for unused plane %d\n", i); + return -EINVAL; + } + + if (r->offsets[i]) { + DRM_DEBUG_KMS("non-zero offset for unused plane %d\n", i); + return -EINVAL; + } + } + + return 0; +} + +struct drm_framebuffer * +drm_internal_framebuffer_create(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *r, + struct drm_file *file_priv) +{ + struct drm_mode_config *config = &dev->mode_config; + struct drm_framebuffer *fb; + int ret; + + if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) { + DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags); + return ERR_PTR(-EINVAL); + } + + if ((config->min_width > r->width) || (r->width > config->max_width)) { + DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n", + r->width, config->min_width, config->max_width); + return ERR_PTR(-EINVAL); + } + if ((config->min_height > r->height) || (r->height > config->max_height)) { + DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n", + r->height, config->min_height, config->max_height); + return ERR_PTR(-EINVAL); + } + + if (r->flags & DRM_MODE_FB_MODIFIERS && + !dev->mode_config.allow_fb_modifiers) { + DRM_DEBUG_KMS("driver does not support fb modifiers\n"); + return ERR_PTR(-EINVAL); + } + + ret = framebuffer_check(r); + if (ret) + return ERR_PTR(ret); + + fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); + if (IS_ERR(fb)) { + DRM_DEBUG_KMS("could not create framebuffer\n"); + return fb; + } + + return fb; +} + +/** + * drm_mode_addfb2 - add an FB to the graphics configuration + * @dev: drm device for the ioctl + * @data: data pointer for the ioctl + * @file_priv: drm file for the ioctl call + * + * Add a new FB to the specified CRTC, given a user request with format. This is + * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers + * and uses fourcc codes as pixel format specifiers. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_addfb2(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_fb_cmd2 *r = data; + struct drm_framebuffer *fb; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + fb = drm_internal_framebuffer_create(dev, r, file_priv); + if (IS_ERR(fb)) + return PTR_ERR(fb); + + DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); + r->fb_id = fb->base.id; + + /* Transfer ownership to the filp for reaping on close */ + mutex_lock(&file_priv->fbs_lock); + list_add(&fb->filp_head, &file_priv->fbs); + mutex_unlock(&file_priv->fbs_lock); + + return 0; +} + +struct drm_mode_rmfb_work { + struct work_struct work; + struct list_head fbs; +}; + +static void drm_mode_rmfb_work_fn(struct work_struct *w) +{ + struct drm_mode_rmfb_work *arg = container_of(w, typeof(*arg), work); + + while (!list_empty(&arg->fbs)) { + struct drm_framebuffer *fb = + list_first_entry(&arg->fbs, typeof(*fb), filp_head); + + list_del_init(&fb->filp_head); + drm_framebuffer_remove(fb); + } +} + +/** + * drm_mode_rmfb - remove an FB from the configuration + * @dev: drm device for the ioctl + * @data: data pointer for the ioctl + * @file_priv: drm file for the ioctl call + * + * Remove the FB specified by the user. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_rmfb(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_framebuffer *fb = NULL; + struct drm_framebuffer *fbl = NULL; + uint32_t *id = data; + int found = 0; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + fb = drm_framebuffer_lookup(dev, *id); + if (!fb) + return -ENOENT; + + mutex_lock(&file_priv->fbs_lock); + list_for_each_entry(fbl, &file_priv->fbs, filp_head) + if (fb == fbl) + found = 1; + if (!found) { + mutex_unlock(&file_priv->fbs_lock); + goto fail_unref; + } + + list_del_init(&fb->filp_head); + mutex_unlock(&file_priv->fbs_lock); + + /* drop the reference we picked up in framebuffer lookup */ + drm_framebuffer_unreference(fb); + + /* + * we now own the reference that was stored in the fbs list + * + * drm_framebuffer_remove may fail with -EINTR on pending signals, + * so run this in a separate stack as there's no way to correctly + * handle this after the fb is already removed from the lookup table. + */ + if (drm_framebuffer_read_refcount(fb) > 1) { + struct drm_mode_rmfb_work arg; + + INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn); + INIT_LIST_HEAD(&arg.fbs); + list_add_tail(&fb->filp_head, &arg.fbs); + + schedule_work(&arg.work); + flush_work(&arg.work); + destroy_work_on_stack(&arg.work); + } else + drm_framebuffer_unreference(fb); + + return 0; + +fail_unref: + drm_framebuffer_unreference(fb); + return -ENOENT; +} + +/** + * drm_mode_getfb - get FB info + * @dev: drm device for the ioctl + * @data: data pointer for the ioctl + * @file_priv: drm file for the ioctl call + * + * Lookup the FB given its ID and return info about it. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_getfb(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_fb_cmd *r = data; + struct drm_framebuffer *fb; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + fb = drm_framebuffer_lookup(dev, r->fb_id); + if (!fb) + return -ENOENT; + + r->height = fb->height; + r->width = fb->width; + r->depth = fb->depth; + r->bpp = fb->bits_per_pixel; + r->pitch = fb->pitches[0]; + if (fb->funcs->create_handle) { + if (drm_is_current_master(file_priv) || capable(CAP_SYS_ADMIN) || + drm_is_control_client(file_priv)) { + ret = fb->funcs->create_handle(fb, file_priv, + &r->handle); + } else { + /* GET_FB() is an unprivileged ioctl so we must not + * return a buffer-handle to non-master processes! For + * backwards-compatibility reasons, we cannot make + * GET_FB() privileged, so just return an invalid handle + * for non-masters. */ + r->handle = 0; + ret = 0; + } + } else { + ret = -ENODEV; + } + + drm_framebuffer_unreference(fb); + + return ret; +} + +/** + * drm_mode_dirtyfb_ioctl - flush frontbuffer rendering on an FB + * @dev: drm device for the ioctl + * @data: data pointer for the ioctl + * @file_priv: drm file for the ioctl call + * + * Lookup the FB and flush out the damaged area supplied by userspace as a clip + * rectangle list. Generic userspace which does frontbuffer rendering must call + * this ioctl to flush out the changes on manual-update display outputs, e.g. + * usb display-link, mipi manual update panels or edp panel self refresh modes. + * + * Modesetting drivers which always update the frontbuffer do not need to + * implement the corresponding ->dirty framebuffer callback. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_dirtyfb_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_clip_rect __user *clips_ptr; + struct drm_clip_rect *clips = NULL; + struct drm_mode_fb_dirty_cmd *r = data; + struct drm_framebuffer *fb; + unsigned flags; + int num_clips; + int ret; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + fb = drm_framebuffer_lookup(dev, r->fb_id); + if (!fb) + return -ENOENT; + + num_clips = r->num_clips; + clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr; + + if (!num_clips != !clips_ptr) { + ret = -EINVAL; + goto out_err1; + } + + flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags; + + /* If userspace annotates copy, clips must come in pairs */ + if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) { + ret = -EINVAL; + goto out_err1; + } + + if (num_clips && clips_ptr) { + if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) { + ret = -EINVAL; + goto out_err1; + } + clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL); + if (!clips) { + ret = -ENOMEM; + goto out_err1; + } + + ret = copy_from_user(clips, clips_ptr, + num_clips * sizeof(*clips)); + if (ret) { + ret = -EFAULT; + goto out_err2; + } + } + + if (fb->funcs->dirty) { + ret = fb->funcs->dirty(fb, file_priv, flags, r->color, + clips, num_clips); + } else { + ret = -ENOSYS; + } + +out_err2: + kfree(clips); +out_err1: + drm_framebuffer_unreference(fb); + + return ret; +} + +/** + * drm_fb_release - remove and free the FBs on this file + * @priv: drm file for the ioctl + * + * Destroy all the FBs associated with @filp. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +void drm_fb_release(struct drm_file *priv) +{ + struct drm_framebuffer *fb, *tfb; + struct drm_mode_rmfb_work arg; + + INIT_LIST_HEAD(&arg.fbs); + + /* + * When the file gets released that means no one else can access the fb + * list any more, so no need to grab fpriv->fbs_lock. And we need to + * avoid upsetting lockdep since the universal cursor code adds a + * framebuffer while holding mutex locks. + * + * Note that a real deadlock between fpriv->fbs_lock and the modeset + * locks is impossible here since no one else but this function can get + * at it any more. + */ + list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) { + if (drm_framebuffer_read_refcount(fb) > 1) { + list_move_tail(&fb->filp_head, &arg.fbs); + } else { + list_del_init(&fb->filp_head); + + /* This drops the fpriv->fbs reference. */ + drm_framebuffer_unreference(fb); + } + } + + if (!list_empty(&arg.fbs)) { + INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn); + + schedule_work(&arg.work); + flush_work(&arg.work); + destroy_work_on_stack(&arg.work); + } +} + +void drm_framebuffer_free(struct kref *kref) +{ + struct drm_framebuffer *fb = + container_of(kref, struct drm_framebuffer, base.refcount); + struct drm_device *dev = fb->dev; + + /* + * The lookup idr holds a weak reference, which has not necessarily been + * removed at this point. Check for that. + */ + drm_mode_object_unregister(dev, &fb->base); + + fb->funcs->destroy(fb); +} + +/** + * drm_framebuffer_init - initialize a framebuffer + * @dev: DRM device + * @fb: framebuffer to be initialized + * @funcs: ... with these functions + * + * Allocates an ID for the framebuffer's parent mode object, sets its mode + * functions & device file and adds it to the master fd list. + * + * IMPORTANT: + * This functions publishes the fb and makes it available for concurrent access + * by other users. Which means by this point the fb _must_ be fully set up - + * since all the fb attributes are invariant over its lifetime, no further + * locking but only correct reference counting is required. + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, + const struct drm_framebuffer_funcs *funcs) +{ + int ret; + + INIT_LIST_HEAD(&fb->filp_head); + fb->dev = dev; + fb->funcs = funcs; + + ret = drm_mode_object_get_reg(dev, &fb->base, DRM_MODE_OBJECT_FB, + false, drm_framebuffer_free); + if (ret) + goto out; + + mutex_lock(&dev->mode_config.fb_lock); + dev->mode_config.num_fb++; + list_add(&fb->head, &dev->mode_config.fb_list); + mutex_unlock(&dev->mode_config.fb_lock); + + drm_mode_object_register(dev, &fb->base); +out: + return ret; +} +EXPORT_SYMBOL(drm_framebuffer_init); + +/** + * drm_framebuffer_lookup - look up a drm framebuffer and grab a reference + * @dev: drm device + * @id: id of the fb object + * + * If successful, this grabs an additional reference to the framebuffer - + * callers need to make sure to eventually unreference the returned framebuffer + * again, using @drm_framebuffer_unreference. + */ +struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev, + uint32_t id) +{ + struct drm_mode_object *obj; + struct drm_framebuffer *fb = NULL; + + obj = __drm_mode_object_find(dev, id, DRM_MODE_OBJECT_FB); + if (obj) + fb = obj_to_fb(obj); + return fb; +} +EXPORT_SYMBOL(drm_framebuffer_lookup); + +/** + * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr + * @fb: fb to unregister + * + * Drivers need to call this when cleaning up driver-private framebuffers, e.g. + * those used for fbdev. Note that the caller must hold a reference of it's own, + * i.e. the object may not be destroyed through this call (since it'll lead to a + * locking inversion). + */ +void drm_framebuffer_unregister_private(struct drm_framebuffer *fb) +{ + struct drm_device *dev; + + if (!fb) + return; + + dev = fb->dev; + + /* Mark fb as reaped and drop idr ref. */ + drm_mode_object_unregister(dev, &fb->base); +} +EXPORT_SYMBOL(drm_framebuffer_unregister_private); + +/** + * drm_framebuffer_cleanup - remove a framebuffer object + * @fb: framebuffer to remove + * + * Cleanup framebuffer. This function is intended to be used from the drivers + * ->destroy callback. It can also be used to clean up driver private + * framebuffers embedded into a larger structure. + * + * Note that this function does not remove the fb from active usuage - if it is + * still used anywhere, hilarity can ensue since userspace could call getfb on + * the id and get back -EINVAL. Obviously no concern at driver unload time. + * + * Also, the framebuffer will not be removed from the lookup idr - for + * user-created framebuffers this will happen in in the rmfb ioctl. For + * driver-private objects (e.g. for fbdev) drivers need to explicitly call + * drm_framebuffer_unregister_private. + */ +void drm_framebuffer_cleanup(struct drm_framebuffer *fb) +{ + struct drm_device *dev = fb->dev; + + mutex_lock(&dev->mode_config.fb_lock); + list_del(&fb->head); + dev->mode_config.num_fb--; + mutex_unlock(&dev->mode_config.fb_lock); +} +EXPORT_SYMBOL(drm_framebuffer_cleanup); + +/** + * drm_framebuffer_remove - remove and unreference a framebuffer object + * @fb: framebuffer to remove + * + * Scans all the CRTCs and planes in @dev's mode_config. If they're + * using @fb, removes it, setting it to NULL. Then drops the reference to the + * passed-in framebuffer. Might take the modeset locks. + * + * Note that this function optimizes the cleanup away if the caller holds the + * last reference to the framebuffer. It is also guaranteed to not take the + * modeset locks in this case. + */ +void drm_framebuffer_remove(struct drm_framebuffer *fb) +{ + struct drm_device *dev; + struct drm_crtc *crtc; + struct drm_plane *plane; + + if (!fb) + return; + + dev = fb->dev; + + WARN_ON(!list_empty(&fb->filp_head)); + + /* + * drm ABI mandates that we remove any deleted framebuffers from active + * useage. But since most sane clients only remove framebuffers they no + * longer need, try to optimize this away. + * + * Since we're holding a reference ourselves, observing a refcount of 1 + * means that we're the last holder and can skip it. Also, the refcount + * can never increase from 1 again, so we don't need any barriers or + * locks. + * + * Note that userspace could try to race with use and instate a new + * usage _after_ we've cleared all current ones. End result will be an + * in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot + * in this manner. + */ + if (drm_framebuffer_read_refcount(fb) > 1) { + drm_modeset_lock_all(dev); + /* remove from any CRTC */ + drm_for_each_crtc(crtc, dev) { + if (crtc->primary->fb == fb) { + /* should turn off the crtc */ + if (drm_crtc_force_disable(crtc)) + DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc); + } + } + + drm_for_each_plane(plane, dev) { + if (plane->fb == fb) + drm_plane_force_disable(plane); + } + drm_modeset_unlock_all(dev); + } + + drm_framebuffer_unreference(fb); +} +EXPORT_SYMBOL(drm_framebuffer_remove); diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c index 7b30b307674b..dae18e58e79b 100644 --- a/drivers/gpu/drm/drm_hashtab.c +++ b/drivers/gpu/drm/drm_hashtab.c @@ -142,7 +142,7 @@ int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *it unsigned long add) { int ret; - unsigned long mask = (1 << bits) - 1; + unsigned long mask = (1UL << bits) - 1; unsigned long first, unshifted_key; unshifted_key = hash_long(seed, bits); diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index 57676f8d7ecf..32a489b0faff 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -346,6 +346,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd, struct drm_stats __user *stats; int i, err; + memset(&s32, 0, sizeof(drm_stats32_t)); stats = compat_alloc_user_space(sizeof(*stats)); if (!stats) return -EFAULT; diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 33af4a5ddca1..b97c421c65f4 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -189,9 +189,8 @@ static int drm_getclient(struct drm_device *dev, void *data, */ if (client->idx == 0) { client->auth = file_priv->authenticated; - client->pid = pid_vnr(file_priv->pid); - client->uid = from_kuid_munged(current_user_ns(), - file_priv->uid); + client->pid = task_pid_vnr(current); + client->uid = overflowuid; client->magic = 0; client->iocs = 0; @@ -228,6 +227,7 @@ static int drm_getstats(struct drm_device *dev, void *data, static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_get_cap *req = data; + struct drm_crtc *crtc; req->value = 0; switch (req->capability) { @@ -254,6 +254,13 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_ case DRM_CAP_ASYNC_PAGE_FLIP: req->value = dev->mode_config.async_page_flip; break; + case DRM_CAP_PAGE_FLIP_TARGET: + req->value = 1; + drm_for_each_crtc(crtc, dev) { + if (!crtc->funcs->page_flip_target) + req->value = 0; + } + break; case DRM_CAP_CURSOR_WIDTH: if (dev->mode_config.cursor_width) req->value = dev->mode_config.cursor_width; @@ -714,9 +721,9 @@ long drm_ioctl(struct file *filp, if (ksize > in_size) memset(kdata + in_size, 0, ksize - in_size); - /* Enforce sane locking for kms driver ioctls. Core ioctls are + /* Enforce sane locking for modern driver ioctls. Core ioctls are * too messy still. */ - if ((drm_core_check_feature(dev, DRIVER_MODESET) && is_driver_ioctl) || + if ((!drm_core_check_feature(dev, DRIVER_LEGACY) && is_driver_ioctl) || (ioctl->flags & DRM_UNLOCKED)) retcode = func(dev, kdata, file_priv); else { diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 77f357b2c386..404a1ce7730c 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -482,7 +482,7 @@ int drm_irq_install(struct drm_device *dev, int irq) return ret; } - if (!drm_core_check_feature(dev, DRIVER_MODESET)) + if (drm_core_check_feature(dev, DRIVER_LEGACY)) vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL); /* After installing handler */ @@ -491,7 +491,7 @@ int drm_irq_install(struct drm_device *dev, int irq) if (ret < 0) { dev->irq_enabled = false; - if (!drm_core_check_feature(dev, DRIVER_MODESET)) + if (drm_core_check_feature(dev, DRIVER_LEGACY)) vga_client_register(dev->pdev, NULL, NULL, NULL); free_irq(irq, dev); } else { @@ -557,7 +557,7 @@ int drm_irq_uninstall(struct drm_device *dev) DRM_DEBUG("irq=%d\n", dev->irq); - if (!drm_core_check_feature(dev, DRIVER_MODESET)) + if (drm_core_check_feature(dev, DRIVER_LEGACY)) vga_client_register(dev->pdev, NULL, NULL, NULL); if (dev->driver->irq_uninstall) @@ -592,7 +592,7 @@ int drm_control(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return 0; - if (drm_core_check_feature(dev, DRIVER_MODESET)) + if (!drm_core_check_feature(dev, DRIVER_LEGACY)) return 0; /* UMS was only ever supported on pci devices. */ if (WARN_ON(!dev->pdev)) @@ -713,10 +713,10 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants); * Negative value on error, failure or if not supported in current * video mode: * - * -EINVAL - Invalid CRTC. - * -EAGAIN - Temporary unavailable, e.g., called before initial modeset. - * -ENOTSUPP - Function not supported in current display mode. - * -EIO - Failed, e.g., due to failed scanout position query. + * -EINVAL Invalid CRTC. + * -EAGAIN Temporary unavailable, e.g., called before initial modeset. + * -ENOTSUPP Function not supported in current display mode. + * -EIO Failed, e.g., due to failed scanout position query. * * Returns or'ed positive status flags on success: * @@ -1295,7 +1295,7 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe) if (e->pipe != pipe) continue; DRM_DEBUG("Sending premature vblank event on disable: " - "wanted %d, current %d\n", + "wanted %u, current %u\n", e->event.sequence, seq); list_del(&e->base.link); drm_vblank_put(dev, pipe); @@ -1519,7 +1519,7 @@ int drm_modeset_ctl(struct drm_device *dev, void *data, return 0; /* KMS drivers handle this internally */ - if (drm_core_check_feature(dev, DRIVER_MODESET)) + if (!drm_core_check_feature(dev, DRIVER_LEGACY)) return 0; pipe = modeset->crtc; @@ -1585,7 +1585,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe, seq = drm_vblank_count_and_time(dev, pipe, &now); - DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n", + DRM_DEBUG("event on vblank count %u, current %u, crtc %u\n", vblwait->request.sequence, seq, pipe); trace_drm_vblank_event_queued(current->pid, pipe, @@ -1693,7 +1693,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, return drm_queue_vblank_event(dev, pipe, vblwait, file_priv); } - DRM_DEBUG("waiting on vblank count %d, crtc %u\n", + DRM_DEBUG("waiting on vblank count %u, crtc %u\n", vblwait->request.sequence, pipe); DRM_WAIT_ON(ret, vblank->queue, 3 * HZ, (((drm_vblank_count(dev, pipe) - @@ -1708,7 +1708,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, vblwait->reply.tval_sec = now.tv_sec; vblwait->reply.tval_usec = now.tv_usec; - DRM_DEBUG("returning %d to client\n", + DRM_DEBUG("returning %u to client\n", vblwait->reply.sequence); } else { DRM_DEBUG("vblank wait interrupted by signal\n"); @@ -1735,7 +1735,7 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe) if ((seq - e->event.sequence) > (1<<23)) continue; - DRM_DEBUG("vblank event on %d, current %d\n", + DRM_DEBUG("vblank event on %u, current %u\n", e->event.sequence, seq); list_del(&e->base.link); @@ -1826,6 +1826,7 @@ EXPORT_SYMBOL(drm_crtc_handle_vblank); */ u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe) { + WARN_ON_ONCE(dev->max_vblank_count != 0); return 0; } EXPORT_SYMBOL(drm_vblank_no_hw_counter); diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c index 3187c4bb01cb..45db36cd3d20 100644 --- a/drivers/gpu/drm/drm_kms_helper_common.c +++ b/drivers/gpu/drm/drm_kms_helper_common.c @@ -27,7 +27,8 @@ #include <drm/drmP.h> #include <drm/drm_fb_helper.h> -#include <drm/drm_dp_aux_dev.h> + +#include "drm_crtc_helper_internal.h" MODULE_AUTHOR("David Airlie, Jesse Barnes"); MODULE_DESCRIPTION("DRM KMS helper"); diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index 48ac0ebbd663..c901f3c5b269 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c @@ -163,7 +163,7 @@ int drm_legacy_lock(struct drm_device *dev, void *data, struct drm_master *master = file_priv->master; int ret = 0; - if (drm_core_check_feature(dev, DRIVER_MODESET)) + if (!drm_core_check_feature(dev, DRIVER_LEGACY)) return -EINVAL; ++file_priv->lock_count; @@ -252,7 +252,7 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_ struct drm_lock *lock = data; struct drm_master *master = file_priv->master; - if (drm_core_check_feature(dev, DRIVER_MODESET)) + if (!drm_core_check_feature(dev, DRIVER_LEGACY)) return -EINVAL; if (lock->context == DRM_KERNEL_CONTEXT) { diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index cb39f45d6a16..11d44a1e0ab3 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -46,6 +46,7 @@ #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/export.h> +#include <linux/interval_tree_generic.h> /** * DOC: Overview @@ -103,6 +104,72 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_ u64 end, enum drm_mm_search_flags flags); +#define START(node) ((node)->start) +#define LAST(node) ((node)->start + (node)->size - 1) + +INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, + u64, __subtree_last, + START, LAST, static inline, drm_mm_interval_tree) + +struct drm_mm_node * +drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last) +{ + return drm_mm_interval_tree_iter_first(&mm->interval_tree, + start, last); +} +EXPORT_SYMBOL(drm_mm_interval_first); + +struct drm_mm_node * +drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last) +{ + return drm_mm_interval_tree_iter_next(node, start, last); +} +EXPORT_SYMBOL(drm_mm_interval_next); + +static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node, + struct drm_mm_node *node) +{ + struct drm_mm *mm = hole_node->mm; + struct rb_node **link, *rb; + struct drm_mm_node *parent; + + node->__subtree_last = LAST(node); + + if (hole_node->allocated) { + rb = &hole_node->rb; + while (rb) { + parent = rb_entry(rb, struct drm_mm_node, rb); + if (parent->__subtree_last >= node->__subtree_last) + break; + + parent->__subtree_last = node->__subtree_last; + rb = rb_parent(rb); + } + + rb = &hole_node->rb; + link = &hole_node->rb.rb_right; + } else { + rb = NULL; + link = &mm->interval_tree.rb_node; + } + + while (*link) { + rb = *link; + parent = rb_entry(rb, struct drm_mm_node, rb); + if (parent->__subtree_last < node->__subtree_last) + parent->__subtree_last = node->__subtree_last; + if (node->start < parent->start) + link = &parent->rb.rb_left; + else + link = &parent->rb.rb_right; + } + + rb_link_node(&node->rb, rb, link); + rb_insert_augmented(&node->rb, + &mm->interval_tree, + &drm_mm_interval_tree_augment); +} + static void drm_mm_insert_helper(struct drm_mm_node *hole_node, struct drm_mm_node *node, u64 size, unsigned alignment, @@ -150,9 +217,10 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node, node->color = color; node->allocated = 1; - INIT_LIST_HEAD(&node->hole_stack); list_add(&node->node_list, &hole_node->node_list); + drm_mm_interval_tree_add_node(hole_node, node); + BUG_ON(node->start + node->size > adj_end); node->hole_follows = 0; @@ -178,41 +246,54 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node, */ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) { + u64 end = node->start + node->size; struct drm_mm_node *hole; - u64 end; - u64 hole_start; - u64 hole_end; + u64 hole_start, hole_end; - BUG_ON(node == NULL); + if (WARN_ON(node->size == 0)) + return -EINVAL; end = node->start + node->size; /* Find the relevant hole to add our node to */ - drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { - if (hole_start > node->start || hole_end < end) - continue; + hole = drm_mm_interval_tree_iter_first(&mm->interval_tree, + node->start, ~(u64)0); + if (hole) { + if (hole->start < end) + return -ENOSPC; + } else { + hole = list_entry(&mm->head_node.node_list, + typeof(*hole), node_list); + } - node->mm = mm; - node->allocated = 1; + hole = list_last_entry(&hole->node_list, typeof(*hole), node_list); + if (!hole->hole_follows) + return -ENOSPC; - INIT_LIST_HEAD(&node->hole_stack); - list_add(&node->node_list, &hole->node_list); + hole_start = __drm_mm_hole_node_start(hole); + hole_end = __drm_mm_hole_node_end(hole); + if (hole_start > node->start || hole_end < end) + return -ENOSPC; - if (node->start == hole_start) { - hole->hole_follows = 0; - list_del_init(&hole->hole_stack); - } + node->mm = mm; + node->allocated = 1; - node->hole_follows = 0; - if (end != hole_end) { - list_add(&node->hole_stack, &mm->hole_stack); - node->hole_follows = 1; - } + list_add(&node->node_list, &hole->node_list); - return 0; + drm_mm_interval_tree_add_node(hole, node); + + if (node->start == hole_start) { + hole->hole_follows = 0; + list_del(&hole->hole_stack); } - return -ENOSPC; + node->hole_follows = 0; + if (end != hole_end) { + list_add(&node->hole_stack, &mm->hole_stack); + node->hole_follows = 1; + } + + return 0; } EXPORT_SYMBOL(drm_mm_reserve_node); @@ -239,6 +320,9 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, { struct drm_mm_node *hole_node; + if (WARN_ON(size == 0)) + return -EINVAL; + hole_node = drm_mm_search_free_generic(mm, size, alignment, color, sflags); if (!hole_node) @@ -299,9 +383,10 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, node->color = color; node->allocated = 1; - INIT_LIST_HEAD(&node->hole_stack); list_add(&node->node_list, &hole_node->node_list); + drm_mm_interval_tree_add_node(hole_node, node); + BUG_ON(node->start < start); BUG_ON(node->start < adj_start); BUG_ON(node->start + node->size > adj_end); @@ -340,6 +425,9 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *n { struct drm_mm_node *hole_node; + if (WARN_ON(size == 0)) + return -EINVAL; + hole_node = drm_mm_search_free_in_range_generic(mm, size, alignment, color, start, end, sflags); @@ -390,6 +478,7 @@ void drm_mm_remove_node(struct drm_mm_node *node) } else list_move(&prev_node->hole_stack, &mm->hole_stack); + drm_mm_interval_tree_remove(node, &mm->interval_tree); list_del(&node->node_list); node->allocated = 0; } @@ -516,11 +605,13 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) { list_replace(&old->node_list, &new->node_list); list_replace(&old->hole_stack, &new->hole_stack); + rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree); new->hole_follows = old->hole_follows; new->mm = old->mm; new->start = old->start; new->size = old->size; new->color = old->color; + new->__subtree_last = old->__subtree_last; old->allocated = 0; new->allocated = 1; @@ -748,7 +839,6 @@ void drm_mm_init(struct drm_mm * mm, u64 start, u64 size) /* Clever trick to avoid a special case in the free hole tracking. */ INIT_LIST_HEAD(&mm->head_node.node_list); - INIT_LIST_HEAD(&mm->head_node.hole_stack); mm->head_node.hole_follows = 1; mm->head_node.scanned_block = 0; mm->head_node.scanned_prev_free = 0; @@ -758,6 +848,8 @@ void drm_mm_init(struct drm_mm * mm, u64 start, u64 size) mm->head_node.size = start - mm->head_node.start; list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); + mm->interval_tree = RB_ROOT; + mm->color_adjust = NULL; } EXPORT_SYMBOL(drm_mm_init); diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c new file mode 100644 index 000000000000..6edda8382a4c --- /dev/null +++ b/drivers/gpu/drm/drm_mode_object.c @@ -0,0 +1,437 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include <linux/export.h> +#include <drm/drmP.h> +#include <drm/drm_mode_object.h> + +#include "drm_crtc_internal.h" + +/* + * Internal function to assign a slot in the object idr and optionally + * register the object into the idr. + */ +int drm_mode_object_get_reg(struct drm_device *dev, + struct drm_mode_object *obj, + uint32_t obj_type, + bool register_obj, + void (*obj_free_cb)(struct kref *kref)) +{ + int ret; + + mutex_lock(&dev->mode_config.idr_mutex); + ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL); + if (ret >= 0) { + /* + * Set up the object linking under the protection of the idr + * lock so that other users can't see inconsistent state. + */ + obj->id = ret; + obj->type = obj_type; + if (obj_free_cb) { + obj->free_cb = obj_free_cb; + kref_init(&obj->refcount); + } + } + mutex_unlock(&dev->mode_config.idr_mutex); + + return ret < 0 ? ret : 0; +} + +/** + * drm_mode_object_get - allocate a new modeset identifier + * @dev: DRM device + * @obj: object pointer, used to generate unique ID + * @obj_type: object type + * + * Create a unique identifier based on @ptr in @dev's identifier space. Used + * for tracking modes, CRTCs and connectors. Note that despite the _get postfix + * modeset identifiers are _not_ reference counted. Hence don't use this for + * reference counted modeset objects like framebuffers. + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_mode_object_get(struct drm_device *dev, + struct drm_mode_object *obj, uint32_t obj_type) +{ + return drm_mode_object_get_reg(dev, obj, obj_type, true, NULL); +} + +void drm_mode_object_register(struct drm_device *dev, + struct drm_mode_object *obj) +{ + mutex_lock(&dev->mode_config.idr_mutex); + idr_replace(&dev->mode_config.crtc_idr, obj, obj->id); + mutex_unlock(&dev->mode_config.idr_mutex); +} + +/** + * drm_mode_object_unregister - free a modeset identifer + * @dev: DRM device + * @object: object to free + * + * Free @id from @dev's unique identifier pool. + * This function can be called multiple times, and guards against + * multiple removals. + * These modeset identifiers are _not_ reference counted. Hence don't use this + * for reference counted modeset objects like framebuffers. + */ +void drm_mode_object_unregister(struct drm_device *dev, + struct drm_mode_object *object) +{ + mutex_lock(&dev->mode_config.idr_mutex); + if (object->id) { + idr_remove(&dev->mode_config.crtc_idr, object->id); + object->id = 0; + } + mutex_unlock(&dev->mode_config.idr_mutex); +} + +struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev, + uint32_t id, uint32_t type) +{ + struct drm_mode_object *obj = NULL; + + mutex_lock(&dev->mode_config.idr_mutex); + obj = idr_find(&dev->mode_config.crtc_idr, id); + if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type) + obj = NULL; + if (obj && obj->id != id) + obj = NULL; + + if (obj && obj->free_cb) { + if (!kref_get_unless_zero(&obj->refcount)) + obj = NULL; + } + mutex_unlock(&dev->mode_config.idr_mutex); + + return obj; +} + +/** + * drm_mode_object_find - look up a drm object with static lifetime + * @dev: drm device + * @id: id of the mode object + * @type: type of the mode object + * + * This function is used to look up a modeset object. It will acquire a + * reference for reference counted objects. This reference must be dropped again + * by callind drm_mode_object_unreference(). + */ +struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, + uint32_t id, uint32_t type) +{ + struct drm_mode_object *obj = NULL; + + obj = __drm_mode_object_find(dev, id, type); + return obj; +} +EXPORT_SYMBOL(drm_mode_object_find); + +/** + * drm_mode_object_unreference - decr the object refcnt + * @obj: mode_object + * + * This function decrements the object's refcount if it is a refcounted modeset + * object. It is a no-op on any other object. This is used to drop references + * acquired with drm_mode_object_reference(). + */ +void drm_mode_object_unreference(struct drm_mode_object *obj) +{ + if (obj->free_cb) { + DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount)); + kref_put(&obj->refcount, obj->free_cb); + } +} +EXPORT_SYMBOL(drm_mode_object_unreference); + +/** + * drm_mode_object_reference - incr the object refcnt + * @obj: mode_object + * + * This function increments the object's refcount if it is a refcounted modeset + * object. It is a no-op on any other object. References should be dropped again + * by calling drm_mode_object_unreference(). + */ +void drm_mode_object_reference(struct drm_mode_object *obj) +{ + if (obj->free_cb) { + DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount)); + kref_get(&obj->refcount); + } +} +EXPORT_SYMBOL(drm_mode_object_reference); + +/** + * drm_object_attach_property - attach a property to a modeset object + * @obj: drm modeset object + * @property: property to attach + * @init_val: initial value of the property + * + * This attaches the given property to the modeset object with the given initial + * value. Currently this function cannot fail since the properties are stored in + * a statically sized array. + */ +void drm_object_attach_property(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t init_val) +{ + int count = obj->properties->count; + + if (count == DRM_OBJECT_MAX_PROPERTY) { + WARN(1, "Failed to attach object property (type: 0x%x). Please " + "increase DRM_OBJECT_MAX_PROPERTY by 1 for each time " + "you see this message on the same object type.\n", + obj->type); + return; + } + + obj->properties->properties[count] = property; + obj->properties->values[count] = init_val; + obj->properties->count++; +} +EXPORT_SYMBOL(drm_object_attach_property); + +/** + * drm_object_property_set_value - set the value of a property + * @obj: drm mode object to set property value for + * @property: property to set + * @val: value the property should be set to + * + * This function sets a given property on a given object. This function only + * changes the software state of the property, it does not call into the + * driver's ->set_property callback. + * + * Note that atomic drivers should not have any need to call this, the core will + * ensure consistency of values reported back to userspace through the + * appropriate ->atomic_get_property callback. Only legacy drivers should call + * this function to update the tracked value (after clamping and other + * restrictions have been applied). + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_object_property_set_value(struct drm_mode_object *obj, + struct drm_property *property, uint64_t val) +{ + int i; + + for (i = 0; i < obj->properties->count; i++) { + if (obj->properties->properties[i] == property) { + obj->properties->values[i] = val; + return 0; + } + } + + return -EINVAL; +} +EXPORT_SYMBOL(drm_object_property_set_value); + +/** + * drm_object_property_get_value - retrieve the value of a property + * @obj: drm mode object to get property value from + * @property: property to retrieve + * @val: storage for the property value + * + * This function retrieves the softare state of the given property for the given + * property. Since there is no driver callback to retrieve the current property + * value this might be out of sync with the hardware, depending upon the driver + * and property. + * + * Atomic drivers should never call this function directly, the core will read + * out property values through the various ->atomic_get_property callbacks. + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_object_property_get_value(struct drm_mode_object *obj, + struct drm_property *property, uint64_t *val) +{ + int i; + + /* read-only properties bypass atomic mechanism and still store + * their value in obj->properties->values[].. mostly to avoid + * having to deal w/ EDID and similar props in atomic paths: + */ + if (drm_core_check_feature(property->dev, DRIVER_ATOMIC) && + !(property->flags & DRM_MODE_PROP_IMMUTABLE)) + return drm_atomic_get_property(obj, property, val); + + for (i = 0; i < obj->properties->count; i++) { + if (obj->properties->properties[i] == property) { + *val = obj->properties->values[i]; + return 0; + } + + } + + return -EINVAL; +} +EXPORT_SYMBOL(drm_object_property_get_value); + +/* helper for getconnector and getproperties ioctls */ +int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic, + uint32_t __user *prop_ptr, + uint64_t __user *prop_values, + uint32_t *arg_count_props) +{ + int i, ret, count; + + for (i = 0, count = 0; i < obj->properties->count; i++) { + struct drm_property *prop = obj->properties->properties[i]; + uint64_t val; + + if ((prop->flags & DRM_MODE_PROP_ATOMIC) && !atomic) + continue; + + if (*arg_count_props > count) { + ret = drm_object_property_get_value(obj, prop, &val); + if (ret) + return ret; + + if (put_user(prop->base.id, prop_ptr + count)) + return -EFAULT; + + if (put_user(val, prop_values + count)) + return -EFAULT; + } + + count++; + } + *arg_count_props = count; + + return 0; +} + +/** + * drm_mode_obj_get_properties_ioctl - get the current value of a object's property + * @dev: DRM device + * @data: ioctl data + * @file_priv: DRM file info + * + * This function retrieves the current value for an object's property. Compared + * to the connector specific ioctl this one is extended to also work on crtc and + * plane objects. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_mode_obj_get_properties *arg = data; + struct drm_mode_object *obj; + int ret = 0; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + drm_modeset_lock_all(dev); + + obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type); + if (!obj) { + ret = -ENOENT; + goto out; + } + if (!obj->properties) { + ret = -EINVAL; + goto out_unref; + } + + ret = drm_mode_object_get_properties(obj, file_priv->atomic, + (uint32_t __user *)(unsigned long)(arg->props_ptr), + (uint64_t __user *)(unsigned long)(arg->prop_values_ptr), + &arg->count_props); + +out_unref: + drm_mode_object_unreference(obj); +out: + drm_modeset_unlock_all(dev); + return ret; +} + +int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_mode_obj_set_property *arg = data; + struct drm_mode_object *arg_obj; + struct drm_mode_object *prop_obj; + struct drm_property *property; + int i, ret = -EINVAL; + struct drm_mode_object *ref; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + drm_modeset_lock_all(dev); + + arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type); + if (!arg_obj) { + ret = -ENOENT; + goto out; + } + if (!arg_obj->properties) + goto out_unref; + + for (i = 0; i < arg_obj->properties->count; i++) + if (arg_obj->properties->properties[i]->base.id == arg->prop_id) + break; + + if (i == arg_obj->properties->count) + goto out_unref; + + prop_obj = drm_mode_object_find(dev, arg->prop_id, + DRM_MODE_OBJECT_PROPERTY); + if (!prop_obj) { + ret = -ENOENT; + goto out_unref; + } + property = obj_to_property(prop_obj); + + if (!drm_property_change_valid_get(property, arg->value, &ref)) + goto out_unref; + + switch (arg_obj->type) { + case DRM_MODE_OBJECT_CONNECTOR: + ret = drm_mode_connector_set_obj_prop(arg_obj, property, + arg->value); + break; + case DRM_MODE_OBJECT_CRTC: + ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value); + break; + case DRM_MODE_OBJECT_PLANE: + ret = drm_mode_plane_set_obj_prop(obj_to_plane(arg_obj), + property, arg->value); + break; + } + + drm_property_change_valid_put(property, ref); + +out_unref: + drm_mode_object_unreference(arg_obj); +out: + drm_modeset_unlock_all(dev); + return ret; +} diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index fc5040ae5f25..53f07ac7c174 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -657,11 +657,36 @@ void drm_display_mode_to_videomode(const struct drm_display_mode *dmode, } EXPORT_SYMBOL_GPL(drm_display_mode_to_videomode); +/** + * drm_bus_flags_from_videomode - extract information about pixelclk and + * DE polarity from videomode and store it in a separate variable + * @vm: videomode structure to use + * @bus_flags: information about pixelclk and DE polarity will be stored here + * + * Sets DRM_BUS_FLAG_DE_(LOW|HIGH) and DRM_BUS_FLAG_PIXDATA_(POS|NEG)EDGE + * in @bus_flags according to DISPLAY_FLAGS found in @vm + */ +void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags) +{ + *bus_flags = 0; + if (vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE) + *bus_flags |= DRM_BUS_FLAG_PIXDATA_POSEDGE; + if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE) + *bus_flags |= DRM_BUS_FLAG_PIXDATA_NEGEDGE; + + if (vm->flags & DISPLAY_FLAGS_DE_LOW) + *bus_flags |= DRM_BUS_FLAG_DE_LOW; + if (vm->flags & DISPLAY_FLAGS_DE_HIGH) + *bus_flags |= DRM_BUS_FLAG_DE_HIGH; +} +EXPORT_SYMBOL_GPL(drm_bus_flags_from_videomode); + #ifdef CONFIG_OF /** * of_get_drm_display_mode - get a drm_display_mode from devicetree * @np: device_node with the timing specification * @dmode: will be set to the return value + * @bus_flags: information about pixelclk and DE polarity * @index: index into the list of display timings in devicetree * * This function is expensive and should only be used, if only one mode is to be @@ -672,7 +697,8 @@ EXPORT_SYMBOL_GPL(drm_display_mode_to_videomode); * 0 on success, a negative errno code when no of videomode node was found. */ int of_get_drm_display_mode(struct device_node *np, - struct drm_display_mode *dmode, int index) + struct drm_display_mode *dmode, u32 *bus_flags, + int index) { struct videomode vm; int ret; @@ -682,6 +708,8 @@ int of_get_drm_display_mode(struct device_node *np, return ret; drm_display_mode_from_videomode(&vm, dmode); + if (bus_flags) + drm_bus_flags_from_videomode(&vm, bus_flags); pr_debug("%s: got %dx%d display mode from %s\n", of_node_full_name(np), vm.hactive, vm.vactive, np->name); diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c new file mode 100644 index 000000000000..1d45738f8f98 --- /dev/null +++ b/drivers/gpu/drm/drm_modeset_helper.c @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include <drm/drm_modeset_helper.h> +#include <drm/drm_plane_helper.h> + +/** + * DOC: aux kms helpers + * + * This helper library contains various one-off functions which don't really fit + * anywhere else in the DRM modeset helper library. + */ + +/** + * drm_helper_move_panel_connectors_to_head() - move panels to the front in the + * connector list + * @dev: drm device to operate on + * + * Some userspace presumes that the first connected connector is the main + * display, where it's supposed to display e.g. the login screen. For + * laptops, this should be the main panel. Use this function to sort all + * (eDP/LVDS) panels to the front of the connector list, instead of + * painstakingly trying to initialize them in the right order. + */ +void drm_helper_move_panel_connectors_to_head(struct drm_device *dev) +{ + struct drm_connector *connector, *tmp; + struct list_head panel_list; + + INIT_LIST_HEAD(&panel_list); + + list_for_each_entry_safe(connector, tmp, + &dev->mode_config.connector_list, head) { + if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS || + connector->connector_type == DRM_MODE_CONNECTOR_eDP) + list_move_tail(&connector->head, &panel_list); + } + + list_splice(&panel_list, &dev->mode_config.connector_list); +} +EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head); + +/** + * drm_helper_mode_fill_fb_struct - fill out framebuffer metadata + * @fb: drm_framebuffer object to fill out + * @mode_cmd: metadata from the userspace fb creation request + * + * This helper can be used in a drivers fb_create callback to pre-fill the fb's + * metadata fields. + */ +void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + int i; + + fb->width = mode_cmd->width; + fb->height = mode_cmd->height; + for (i = 0; i < 4; i++) { + fb->pitches[i] = mode_cmd->pitches[i]; + fb->offsets[i] = mode_cmd->offsets[i]; + fb->modifier[i] = mode_cmd->modifier[i]; + } + drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth, + &fb->bits_per_pixel); + fb->pixel_format = mode_cmd->pixel_format; + fb->flags = mode_cmd->flags; +} +EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct); + +/* + * This is the minimal list of formats that seem to be safe for modeset use + * with all current DRM drivers. Most hardware can actually support more + * formats than this and drivers may specify a more accurate list when + * creating the primary plane. However drivers that still call + * drm_plane_init() will use this minimal format list as the default. + */ +static const uint32_t safe_modeset_formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, +}; + +static struct drm_plane *create_primary_plane(struct drm_device *dev) +{ + struct drm_plane *primary; + int ret; + + primary = kzalloc(sizeof(*primary), GFP_KERNEL); + if (primary == NULL) { + DRM_DEBUG_KMS("Failed to allocate primary plane\n"); + return NULL; + } + + /* + * Remove the format_default field from drm_plane when dropping + * this helper. + */ + primary->format_default = true; + + /* possible_crtc's will be filled in later by crtc_init */ + ret = drm_universal_plane_init(dev, primary, 0, + &drm_primary_helper_funcs, + safe_modeset_formats, + ARRAY_SIZE(safe_modeset_formats), + DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) { + kfree(primary); + primary = NULL; + } + + return primary; +} + +/** + * drm_crtc_init - Legacy CRTC initialization function + * @dev: DRM device + * @crtc: CRTC object to init + * @funcs: callbacks for the new CRTC + * + * Initialize a CRTC object with a default helper-provided primary plane and no + * cursor plane. + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, + const struct drm_crtc_funcs *funcs) +{ + struct drm_plane *primary; + + primary = create_primary_plane(dev); + return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs, + NULL); +} +EXPORT_SYMBOL(drm_crtc_init); diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index b2f8f1062d5f..d86362fc8ac6 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -175,7 +175,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data, { struct drm_irq_busid *p = data; - if (drm_core_check_feature(dev, DRIVER_MODESET)) + if (!drm_core_check_feature(dev, DRIVER_LEGACY)) return -EINVAL; /* UMS was only ever support on PCI devices. */ @@ -263,7 +263,7 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, /* No locking needed since shadow-attach is single-threaded since it may * only be called from the per-driver module init hook. */ - if (!drm_core_check_feature(dev, DRIVER_MODESET)) + if (drm_core_check_feature(dev, DRIVER_LEGACY)) list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list); return 0; @@ -299,7 +299,7 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) DRM_DEBUG("\n"); - if (driver->driver_features & DRIVER_MODESET) + if (!(driver->driver_features & DRIVER_LEGACY)) return pci_register_driver(pdriver); /* If not using KMS, fall back to stealth mode manual scanning. */ @@ -421,7 +421,7 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver) struct drm_device *dev, *tmp; DRM_DEBUG("\n"); - if (driver->driver_features & DRIVER_MODESET) { + if (!(driver->driver_features & DRIVER_LEGACY)) { pci_unregister_driver(pdriver); } else { list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list, diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index 16c4a7bd7465..7899fc1dcdb0 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -64,18 +64,6 @@ */ /* - * This is the minimal list of formats that seem to be safe for modeset use - * with all current DRM drivers. Most hardware can actually support more - * formats than this and drivers may specify a more accurate list when - * creating the primary plane. However drivers that still call - * drm_plane_init() will use this minimal format list as the default. - */ -static const uint32_t safe_modeset_formats[] = { - DRM_FORMAT_XRGB8888, - DRM_FORMAT_ARGB8888, -}; - -/* * Returns the connectors currently associated with a CRTC. This function * should be called twice: once with a NULL connector list to retrieve * the list size, and once with the properly allocated list to be filled in. @@ -108,14 +96,9 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc, } /** - * drm_plane_helper_check_update() - Check plane update for validity - * @plane: plane object to update - * @crtc: owning CRTC of owning plane - * @fb: framebuffer to flip onto plane - * @src: source coordinates in 16.16 fixed point - * @dest: integer destination coordinates + * drm_plane_helper_check_state() - Check plane state for validity + * @state: plane state to check * @clip: integer clipping coordinates - * @rotation: plane rotation * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point * @can_position: is it legal to position the plane such that it @@ -123,10 +106,9 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc, * only be false for primary planes. * @can_update_disabled: can the plane be updated while the crtc * is disabled? - * @visible: output parameter indicating whether plane is still visible after - * clipping * - * Checks that a desired plane update is valid. Drivers that provide + * Checks that a desired plane update is valid, and updates various + * bits of derived state (clipped coordinates etc.). Drivers that provide * their own plane handling rather than helper-provided implementations may * still wish to call this function to avoid duplication of error checking * code. @@ -134,29 +116,38 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc, * RETURNS: * Zero if update appears valid, error code on failure */ -int drm_plane_helper_check_update(struct drm_plane *plane, - struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_rect *src, - struct drm_rect *dest, - const struct drm_rect *clip, - unsigned int rotation, - int min_scale, - int max_scale, - bool can_position, - bool can_update_disabled, - bool *visible) +int drm_plane_helper_check_state(struct drm_plane_state *state, + const struct drm_rect *clip, + int min_scale, + int max_scale, + bool can_position, + bool can_update_disabled) { + struct drm_crtc *crtc = state->crtc; + struct drm_framebuffer *fb = state->fb; + struct drm_rect *src = &state->src; + struct drm_rect *dst = &state->dst; + unsigned int rotation = state->rotation; int hscale, vscale; + src->x1 = state->src_x; + src->y1 = state->src_y; + src->x2 = state->src_x + state->src_w; + src->y2 = state->src_y + state->src_h; + + dst->x1 = state->crtc_x; + dst->y1 = state->crtc_y; + dst->x2 = state->crtc_x + state->crtc_w; + dst->y2 = state->crtc_y + state->crtc_h; + if (!fb) { - *visible = false; + state->visible = false; return 0; } /* crtc should only be NULL when disabling (i.e., !fb) */ if (WARN_ON(!crtc)) { - *visible = false; + state->visible = false; return 0; } @@ -168,20 +159,20 @@ int drm_plane_helper_check_update(struct drm_plane *plane, drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation); /* Check scaling */ - hscale = drm_rect_calc_hscale(src, dest, min_scale, max_scale); - vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale); + hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale); + vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale); if (hscale < 0 || vscale < 0) { DRM_DEBUG_KMS("Invalid scaling of plane\n"); - drm_rect_debug_print("src: ", src, true); - drm_rect_debug_print("dst: ", dest, false); + drm_rect_debug_print("src: ", &state->src, true); + drm_rect_debug_print("dst: ", &state->dst, false); return -ERANGE; } - *visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale); + state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale); drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation); - if (!*visible) + if (!state->visible) /* * Plane isn't visible; some drivers can handle this * so we just return success here. Drivers that can't @@ -191,15 +182,87 @@ int drm_plane_helper_check_update(struct drm_plane *plane, */ return 0; - if (!can_position && !drm_rect_equals(dest, clip)) { + if (!can_position && !drm_rect_equals(dst, clip)) { DRM_DEBUG_KMS("Plane must cover entire CRTC\n"); - drm_rect_debug_print("dst: ", dest, false); + drm_rect_debug_print("dst: ", dst, false); drm_rect_debug_print("clip: ", clip, false); return -EINVAL; } return 0; } +EXPORT_SYMBOL(drm_plane_helper_check_state); + +/** + * drm_plane_helper_check_update() - Check plane update for validity + * @plane: plane object to update + * @crtc: owning CRTC of owning plane + * @fb: framebuffer to flip onto plane + * @src: source coordinates in 16.16 fixed point + * @dst: integer destination coordinates + * @clip: integer clipping coordinates + * @rotation: plane rotation + * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point + * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point + * @can_position: is it legal to position the plane such that it + * doesn't cover the entire crtc? This will generally + * only be false for primary planes. + * @can_update_disabled: can the plane be updated while the crtc + * is disabled? + * @visible: output parameter indicating whether plane is still visible after + * clipping + * + * Checks that a desired plane update is valid. Drivers that provide + * their own plane handling rather than helper-provided implementations may + * still wish to call this function to avoid duplication of error checking + * code. + * + * RETURNS: + * Zero if update appears valid, error code on failure + */ +int drm_plane_helper_check_update(struct drm_plane *plane, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_rect *src, + struct drm_rect *dst, + const struct drm_rect *clip, + unsigned int rotation, + int min_scale, + int max_scale, + bool can_position, + bool can_update_disabled, + bool *visible) +{ + struct drm_plane_state state = { + .plane = plane, + .crtc = crtc, + .fb = fb, + .src_x = src->x1, + .src_y = src->y1, + .src_w = drm_rect_width(src), + .src_h = drm_rect_height(src), + .crtc_x = dst->x1, + .crtc_y = dst->y1, + .crtc_w = drm_rect_width(dst), + .crtc_h = drm_rect_height(dst), + .rotation = rotation, + .visible = *visible, + }; + int ret; + + ret = drm_plane_helper_check_state(&state, clip, + min_scale, max_scale, + can_position, + can_update_disabled); + if (ret) + return ret; + + *src = state.src; + *dst = state.dst; + *visible = state.visible; + + return 0; +} EXPORT_SYMBOL(drm_plane_helper_check_update); /** @@ -274,7 +337,7 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc, ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip, - BIT(DRM_ROTATE_0), + DRM_ROTATE_0, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, false, false, &visible); @@ -363,60 +426,6 @@ const struct drm_plane_funcs drm_primary_helper_funcs = { }; EXPORT_SYMBOL(drm_primary_helper_funcs); -static struct drm_plane *create_primary_plane(struct drm_device *dev) -{ - struct drm_plane *primary; - int ret; - - primary = kzalloc(sizeof(*primary), GFP_KERNEL); - if (primary == NULL) { - DRM_DEBUG_KMS("Failed to allocate primary plane\n"); - return NULL; - } - - /* - * Remove the format_default field from drm_plane when dropping - * this helper. - */ - primary->format_default = true; - - /* possible_crtc's will be filled in later by crtc_init */ - ret = drm_universal_plane_init(dev, primary, 0, - &drm_primary_helper_funcs, - safe_modeset_formats, - ARRAY_SIZE(safe_modeset_formats), - DRM_PLANE_TYPE_PRIMARY, NULL); - if (ret) { - kfree(primary); - primary = NULL; - } - - return primary; -} - -/** - * drm_crtc_init - Legacy CRTC initialization function - * @dev: DRM device - * @crtc: CRTC object to init - * @funcs: callbacks for the new CRTC - * - * Initialize a CRTC object with a default helper-provided primary plane and no - * cursor plane. - * - * Returns: - * Zero on success, error code on failure. - */ -int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, - const struct drm_crtc_funcs *funcs) -{ - struct drm_plane *primary; - - primary = create_primary_plane(dev); - return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs, - NULL); -} -EXPORT_SYMBOL(drm_crtc_init); - int drm_plane_helper_commit(struct drm_plane *plane, struct drm_plane_state *plane_state, struct drm_framebuffer *old_fb) diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index a0df377d7d1c..f6b64d7d3528 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -129,6 +129,7 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev) { bool poll = false; struct drm_connector *connector; + unsigned long delay = DRM_OUTPUT_POLL_PERIOD; WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); @@ -141,8 +142,13 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev) poll = true; } + if (dev->mode_config.delayed_event) { + poll = true; + delay = 0; + } + if (poll) - schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); + schedule_delayed_work(&dev->mode_config.output_poll_work, delay); } EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked); diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c new file mode 100644 index 000000000000..a4d81cf4ffa0 --- /dev/null +++ b/drivers/gpu/drm/drm_property.c @@ -0,0 +1,912 @@ +/* + * Copyright (c) 2016 Intel Corporation + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include <linux/export.h> +#include <drm/drmP.h> +#include <drm/drm_property.h> + +#include "drm_crtc_internal.h" + +/** + * DOC: overview + * + * Properties as represented by &drm_property are used to extend the modeset + * interface exposed to userspace. For the atomic modeset IOCTL properties are + * even the only way to transport metadata about the desired new modeset + * configuration from userspace to the kernel. Properties have a well-defined + * value range, which is enforced by the drm core. See the documentation of the + * flags member of struct &drm_property for an overview of the different + * property types and ranges. + * + * Properties don't store the current value directly, but need to be + * instatiated by attaching them to a &drm_mode_object with + * drm_object_attach_property(). + * + * Property values are only 64bit. To support bigger piles of data (like gamma + * tables, color correction matrizes or large structures) a property can instead + * point at a &drm_property_blob with that additional data + * + * Properties are defined by their symbolic name, userspace must keep a + * per-object mapping from those names to the property ID used in the atomic + * IOCTL and in the get/set property IOCTL. + */ + +static bool drm_property_type_valid(struct drm_property *property) +{ + if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) + return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE); + return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE); +} + +/** + * drm_property_create - create a new property type + * @dev: drm device + * @flags: flags specifying the property type + * @name: name of the property + * @num_values: number of pre-defined values + * + * This creates a new generic drm property which can then be attached to a drm + * object with drm_object_attach_property. The returned property object must be + * freed with drm_property_destroy(), which is done automatically when calling + * drm_mode_config_cleanup(). + * + * Returns: + * A pointer to the newly created property on success, NULL on failure. + */ +struct drm_property *drm_property_create(struct drm_device *dev, int flags, + const char *name, int num_values) +{ + struct drm_property *property = NULL; + int ret; + + property = kzalloc(sizeof(struct drm_property), GFP_KERNEL); + if (!property) + return NULL; + + property->dev = dev; + + if (num_values) { + property->values = kcalloc(num_values, sizeof(uint64_t), + GFP_KERNEL); + if (!property->values) + goto fail; + } + + ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY); + if (ret) + goto fail; + + property->flags = flags; + property->num_values = num_values; + INIT_LIST_HEAD(&property->enum_list); + + if (name) { + strncpy(property->name, name, DRM_PROP_NAME_LEN); + property->name[DRM_PROP_NAME_LEN-1] = '\0'; + } + + list_add_tail(&property->head, &dev->mode_config.property_list); + + WARN_ON(!drm_property_type_valid(property)); + + return property; +fail: + kfree(property->values); + kfree(property); + return NULL; +} +EXPORT_SYMBOL(drm_property_create); + +/** + * drm_property_create_enum - create a new enumeration property type + * @dev: drm device + * @flags: flags specifying the property type + * @name: name of the property + * @props: enumeration lists with property values + * @num_values: number of pre-defined values + * + * This creates a new generic drm property which can then be attached to a drm + * object with drm_object_attach_property. The returned property object must be + * freed with drm_property_destroy(), which is done automatically when calling + * drm_mode_config_cleanup(). + * + * Userspace is only allowed to set one of the predefined values for enumeration + * properties. + * + * Returns: + * A pointer to the newly created property on success, NULL on failure. + */ +struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags, + const char *name, + const struct drm_prop_enum_list *props, + int num_values) +{ + struct drm_property *property; + int i, ret; + + flags |= DRM_MODE_PROP_ENUM; + + property = drm_property_create(dev, flags, name, num_values); + if (!property) + return NULL; + + for (i = 0; i < num_values; i++) { + ret = drm_property_add_enum(property, i, + props[i].type, + props[i].name); + if (ret) { + drm_property_destroy(dev, property); + return NULL; + } + } + + return property; +} +EXPORT_SYMBOL(drm_property_create_enum); + +/** + * drm_property_create_bitmask - create a new bitmask property type + * @dev: drm device + * @flags: flags specifying the property type + * @name: name of the property + * @props: enumeration lists with property bitflags + * @num_props: size of the @props array + * @supported_bits: bitmask of all supported enumeration values + * + * This creates a new bitmask drm property which can then be attached to a drm + * object with drm_object_attach_property. The returned property object must be + * freed with drm_property_destroy(), which is done automatically when calling + * drm_mode_config_cleanup(). + * + * Compared to plain enumeration properties userspace is allowed to set any + * or'ed together combination of the predefined property bitflag values + * + * Returns: + * A pointer to the newly created property on success, NULL on failure. + */ +struct drm_property *drm_property_create_bitmask(struct drm_device *dev, + int flags, const char *name, + const struct drm_prop_enum_list *props, + int num_props, + uint64_t supported_bits) +{ + struct drm_property *property; + int i, ret, index = 0; + int num_values = hweight64(supported_bits); + + flags |= DRM_MODE_PROP_BITMASK; + + property = drm_property_create(dev, flags, name, num_values); + if (!property) + return NULL; + for (i = 0; i < num_props; i++) { + if (!(supported_bits & (1ULL << props[i].type))) + continue; + + if (WARN_ON(index >= num_values)) { + drm_property_destroy(dev, property); + return NULL; + } + + ret = drm_property_add_enum(property, index++, + props[i].type, + props[i].name); + if (ret) { + drm_property_destroy(dev, property); + return NULL; + } + } + + return property; +} +EXPORT_SYMBOL(drm_property_create_bitmask); + +static struct drm_property *property_create_range(struct drm_device *dev, + int flags, const char *name, + uint64_t min, uint64_t max) +{ + struct drm_property *property; + + property = drm_property_create(dev, flags, name, 2); + if (!property) + return NULL; + + property->values[0] = min; + property->values[1] = max; + + return property; +} + +/** + * drm_property_create_range - create a new unsigned ranged property type + * @dev: drm device + * @flags: flags specifying the property type + * @name: name of the property + * @min: minimum value of the property + * @max: maximum value of the property + * + * This creates a new generic drm property which can then be attached to a drm + * object with drm_object_attach_property. The returned property object must be + * freed with drm_property_destroy(), which is done automatically when calling + * drm_mode_config_cleanup(). + * + * Userspace is allowed to set any unsigned integer value in the (min, max) + * range inclusive. + * + * Returns: + * A pointer to the newly created property on success, NULL on failure. + */ +struct drm_property *drm_property_create_range(struct drm_device *dev, int flags, + const char *name, + uint64_t min, uint64_t max) +{ + return property_create_range(dev, DRM_MODE_PROP_RANGE | flags, + name, min, max); +} +EXPORT_SYMBOL(drm_property_create_range); + +/** + * drm_property_create_signed_range - create a new signed ranged property type + * @dev: drm device + * @flags: flags specifying the property type + * @name: name of the property + * @min: minimum value of the property + * @max: maximum value of the property + * + * This creates a new generic drm property which can then be attached to a drm + * object with drm_object_attach_property. The returned property object must be + * freed with drm_property_destroy(), which is done automatically when calling + * drm_mode_config_cleanup(). + * + * Userspace is allowed to set any signed integer value in the (min, max) + * range inclusive. + * + * Returns: + * A pointer to the newly created property on success, NULL on failure. + */ +struct drm_property *drm_property_create_signed_range(struct drm_device *dev, + int flags, const char *name, + int64_t min, int64_t max) +{ + return property_create_range(dev, DRM_MODE_PROP_SIGNED_RANGE | flags, + name, I642U64(min), I642U64(max)); +} +EXPORT_SYMBOL(drm_property_create_signed_range); + +/** + * drm_property_create_object - create a new object property type + * @dev: drm device + * @flags: flags specifying the property type + * @name: name of the property + * @type: object type from DRM_MODE_OBJECT_* defines + * + * This creates a new generic drm property which can then be attached to a drm + * object with drm_object_attach_property. The returned property object must be + * freed with drm_property_destroy(), which is done automatically when calling + * drm_mode_config_cleanup(). + * + * Userspace is only allowed to set this to any property value of the given + * @type. Only useful for atomic properties, which is enforced. + * + * Returns: + * A pointer to the newly created property on success, NULL on failure. + */ +struct drm_property *drm_property_create_object(struct drm_device *dev, + int flags, const char *name, + uint32_t type) +{ + struct drm_property *property; + + flags |= DRM_MODE_PROP_OBJECT; + + if (WARN_ON(!(flags & DRM_MODE_PROP_ATOMIC))) + return NULL; + + property = drm_property_create(dev, flags, name, 1); + if (!property) + return NULL; + + property->values[0] = type; + + return property; +} +EXPORT_SYMBOL(drm_property_create_object); + +/** + * drm_property_create_bool - create a new boolean property type + * @dev: drm device + * @flags: flags specifying the property type + * @name: name of the property + * + * This creates a new generic drm property which can then be attached to a drm + * object with drm_object_attach_property. The returned property object must be + * freed with drm_property_destroy(), which is done automatically when calling + * drm_mode_config_cleanup(). + * + * This is implemented as a ranged property with only {0, 1} as valid values. + * + * Returns: + * A pointer to the newly created property on success, NULL on failure. + */ +struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags, + const char *name) +{ + return drm_property_create_range(dev, flags, name, 0, 1); +} +EXPORT_SYMBOL(drm_property_create_bool); + +/** + * drm_property_add_enum - add a possible value to an enumeration property + * @property: enumeration property to change + * @index: index of the new enumeration + * @value: value of the new enumeration + * @name: symbolic name of the new enumeration + * + * This functions adds enumerations to a property. + * + * It's use is deprecated, drivers should use one of the more specific helpers + * to directly create the property with all enumerations already attached. + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_property_add_enum(struct drm_property *property, int index, + uint64_t value, const char *name) +{ + struct drm_property_enum *prop_enum; + + if (!(drm_property_type_is(property, DRM_MODE_PROP_ENUM) || + drm_property_type_is(property, DRM_MODE_PROP_BITMASK))) + return -EINVAL; + + /* + * Bitmask enum properties have the additional constraint of values + * from 0 to 63 + */ + if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK) && + (value > 63)) + return -EINVAL; + + if (!list_empty(&property->enum_list)) { + list_for_each_entry(prop_enum, &property->enum_list, head) { + if (prop_enum->value == value) { + strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN); + prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0'; + return 0; + } + } + } + + prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL); + if (!prop_enum) + return -ENOMEM; + + strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN); + prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0'; + prop_enum->value = value; + + property->values[index] = value; + list_add_tail(&prop_enum->head, &property->enum_list); + return 0; +} +EXPORT_SYMBOL(drm_property_add_enum); + +/** + * drm_property_destroy - destroy a drm property + * @dev: drm device + * @property: property to destry + * + * This function frees a property including any attached resources like + * enumeration values. + */ +void drm_property_destroy(struct drm_device *dev, struct drm_property *property) +{ + struct drm_property_enum *prop_enum, *pt; + + list_for_each_entry_safe(prop_enum, pt, &property->enum_list, head) { + list_del(&prop_enum->head); + kfree(prop_enum); + } + + if (property->num_values) + kfree(property->values); + drm_mode_object_unregister(dev, &property->base); + list_del(&property->head); + kfree(property); +} +EXPORT_SYMBOL(drm_property_destroy); + +int drm_mode_getproperty_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_get_property *out_resp = data; + struct drm_property *property; + int enum_count = 0; + int value_count = 0; + int ret = 0, i; + int copied; + struct drm_property_enum *prop_enum; + struct drm_mode_property_enum __user *enum_ptr; + uint64_t __user *values_ptr; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + drm_modeset_lock_all(dev); + property = drm_property_find(dev, out_resp->prop_id); + if (!property) { + ret = -ENOENT; + goto done; + } + + if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) || + drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) { + list_for_each_entry(prop_enum, &property->enum_list, head) + enum_count++; + } + + value_count = property->num_values; + + strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN); + out_resp->name[DRM_PROP_NAME_LEN-1] = 0; + out_resp->flags = property->flags; + + if ((out_resp->count_values >= value_count) && value_count) { + values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr; + for (i = 0; i < value_count; i++) { + if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) { + ret = -EFAULT; + goto done; + } + } + } + out_resp->count_values = value_count; + + if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) || + drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) { + if ((out_resp->count_enum_blobs >= enum_count) && enum_count) { + copied = 0; + enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr; + list_for_each_entry(prop_enum, &property->enum_list, head) { + + if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) { + ret = -EFAULT; + goto done; + } + + if (copy_to_user(&enum_ptr[copied].name, + &prop_enum->name, DRM_PROP_NAME_LEN)) { + ret = -EFAULT; + goto done; + } + copied++; + } + } + out_resp->count_enum_blobs = enum_count; + } + + /* + * NOTE: The idea seems to have been to use this to read all the blob + * property values. But nothing ever added them to the corresponding + * list, userspace always used the special-purpose get_blob ioctl to + * read the value for a blob property. It also doesn't make a lot of + * sense to return values here when everything else is just metadata for + * the property itself. + */ + if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) + out_resp->count_enum_blobs = 0; +done: + drm_modeset_unlock_all(dev); + return ret; +} + +static void drm_property_free_blob(struct kref *kref) +{ + struct drm_property_blob *blob = + container_of(kref, struct drm_property_blob, base.refcount); + + mutex_lock(&blob->dev->mode_config.blob_lock); + list_del(&blob->head_global); + mutex_unlock(&blob->dev->mode_config.blob_lock); + + drm_mode_object_unregister(blob->dev, &blob->base); + + kfree(blob); +} + +/** + * drm_property_create_blob - Create new blob property + * @dev: DRM device to create property for + * @length: Length to allocate for blob data + * @data: If specified, copies data into blob + * + * Creates a new blob property for a specified DRM device, optionally + * copying data. Note that blob properties are meant to be invariant, hence the + * data must be filled out before the blob is used as the value of any property. + * + * Returns: + * New blob property with a single reference on success, or an ERR_PTR + * value on failure. + */ +struct drm_property_blob * +drm_property_create_blob(struct drm_device *dev, size_t length, + const void *data) +{ + struct drm_property_blob *blob; + int ret; + + if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob)) + return ERR_PTR(-EINVAL); + + blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); + if (!blob) + return ERR_PTR(-ENOMEM); + + /* This must be explicitly initialised, so we can safely call list_del + * on it in the removal handler, even if it isn't in a file list. */ + INIT_LIST_HEAD(&blob->head_file); + blob->length = length; + blob->dev = dev; + + if (data) + memcpy(blob->data, data, length); + + ret = drm_mode_object_get_reg(dev, &blob->base, DRM_MODE_OBJECT_BLOB, + true, drm_property_free_blob); + if (ret) { + kfree(blob); + return ERR_PTR(-EINVAL); + } + + mutex_lock(&dev->mode_config.blob_lock); + list_add_tail(&blob->head_global, + &dev->mode_config.property_blob_list); + mutex_unlock(&dev->mode_config.blob_lock); + + return blob; +} +EXPORT_SYMBOL(drm_property_create_blob); + +/** + * drm_property_unreference_blob - Unreference a blob property + * @blob: Pointer to blob property + * + * Drop a reference on a blob property. May free the object. + */ +void drm_property_unreference_blob(struct drm_property_blob *blob) +{ + if (!blob) + return; + + drm_mode_object_unreference(&blob->base); +} +EXPORT_SYMBOL(drm_property_unreference_blob); + +void drm_property_destroy_user_blobs(struct drm_device *dev, + struct drm_file *file_priv) +{ + struct drm_property_blob *blob, *bt; + + /* + * When the file gets released that means no one else can access the + * blob list any more, so no need to grab dev->blob_lock. + */ + list_for_each_entry_safe(blob, bt, &file_priv->blobs, head_file) { + list_del_init(&blob->head_file); + drm_property_unreference_blob(blob); + } +} + +/** + * drm_property_reference_blob - Take a reference on an existing property + * @blob: Pointer to blob property + * + * Take a new reference on an existing blob property. Returns @blob, which + * allows this to be used as a shorthand in assignments. + */ +struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob) +{ + drm_mode_object_reference(&blob->base); + return blob; +} +EXPORT_SYMBOL(drm_property_reference_blob); + +/** + * drm_property_lookup_blob - look up a blob property and take a reference + * @dev: drm device + * @id: id of the blob property + * + * If successful, this takes an additional reference to the blob property. + * callers need to make sure to eventually unreference the returned property + * again, using @drm_property_unreference_blob. + * + * Return: + * NULL on failure, pointer to the blob on success. + */ +struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev, + uint32_t id) +{ + struct drm_mode_object *obj; + struct drm_property_blob *blob = NULL; + + obj = __drm_mode_object_find(dev, id, DRM_MODE_OBJECT_BLOB); + if (obj) + blob = obj_to_blob(obj); + return blob; +} +EXPORT_SYMBOL(drm_property_lookup_blob); + +/** + * drm_property_replace_global_blob - replace existing blob property + * @dev: drm device + * @replace: location of blob property pointer to be replaced + * @length: length of data for new blob, or 0 for no data + * @data: content for new blob, or NULL for no data + * @obj_holds_id: optional object for property holding blob ID + * @prop_holds_id: optional property holding blob ID + * @return 0 on success or error on failure + * + * This function will replace a global property in the blob list, optionally + * updating a property which holds the ID of that property. + * + * If length is 0 or data is NULL, no new blob will be created, and the holding + * property, if specified, will be set to 0. + * + * Access to the replace pointer is assumed to be protected by the caller, e.g. + * by holding the relevant modesetting object lock for its parent. + * + * For example, a drm_connector has a 'PATH' property, which contains the ID + * of a blob property with the value of the MST path information. Calling this + * function with replace pointing to the connector's path_blob_ptr, length and + * data set for the new path information, obj_holds_id set to the connector's + * base object, and prop_holds_id set to the path property name, will perform + * a completely atomic update. The access to path_blob_ptr is protected by the + * caller holding a lock on the connector. + */ +int drm_property_replace_global_blob(struct drm_device *dev, + struct drm_property_blob **replace, + size_t length, + const void *data, + struct drm_mode_object *obj_holds_id, + struct drm_property *prop_holds_id) +{ + struct drm_property_blob *new_blob = NULL; + struct drm_property_blob *old_blob = NULL; + int ret; + + WARN_ON(replace == NULL); + + old_blob = *replace; + + if (length && data) { + new_blob = drm_property_create_blob(dev, length, data); + if (IS_ERR(new_blob)) + return PTR_ERR(new_blob); + } + + if (obj_holds_id) { + ret = drm_object_property_set_value(obj_holds_id, + prop_holds_id, + new_blob ? + new_blob->base.id : 0); + if (ret != 0) + goto err_created; + } + + drm_property_unreference_blob(old_blob); + *replace = new_blob; + + return 0; + +err_created: + drm_property_unreference_blob(new_blob); + return ret; +} +EXPORT_SYMBOL(drm_property_replace_global_blob); + +int drm_mode_getblob_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_get_blob *out_resp = data; + struct drm_property_blob *blob; + int ret = 0; + void __user *blob_ptr; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + blob = drm_property_lookup_blob(dev, out_resp->blob_id); + if (!blob) + return -ENOENT; + + if (out_resp->length == blob->length) { + blob_ptr = (void __user *)(unsigned long)out_resp->data; + if (copy_to_user(blob_ptr, blob->data, blob->length)) { + ret = -EFAULT; + goto unref; + } + } + out_resp->length = blob->length; +unref: + drm_property_unreference_blob(blob); + + return ret; +} + +int drm_mode_createblob_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_create_blob *out_resp = data; + struct drm_property_blob *blob; + void __user *blob_ptr; + int ret = 0; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + blob = drm_property_create_blob(dev, out_resp->length, NULL); + if (IS_ERR(blob)) + return PTR_ERR(blob); + + blob_ptr = (void __user *)(unsigned long)out_resp->data; + if (copy_from_user(blob->data, blob_ptr, out_resp->length)) { + ret = -EFAULT; + goto out_blob; + } + + /* Dropping the lock between create_blob and our access here is safe + * as only the same file_priv can remove the blob; at this point, it is + * not associated with any file_priv. */ + mutex_lock(&dev->mode_config.blob_lock); + out_resp->blob_id = blob->base.id; + list_add_tail(&blob->head_file, &file_priv->blobs); + mutex_unlock(&dev->mode_config.blob_lock); + + return 0; + +out_blob: + drm_property_unreference_blob(blob); + return ret; +} + +int drm_mode_destroyblob_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_mode_destroy_blob *out_resp = data; + struct drm_property_blob *blob = NULL, *bt; + bool found = false; + int ret = 0; + + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + + blob = drm_property_lookup_blob(dev, out_resp->blob_id); + if (!blob) + return -ENOENT; + + mutex_lock(&dev->mode_config.blob_lock); + /* Ensure the property was actually created by this user. */ + list_for_each_entry(bt, &file_priv->blobs, head_file) { + if (bt == blob) { + found = true; + break; + } + } + + if (!found) { + ret = -EPERM; + goto err; + } + + /* We must drop head_file here, because we may not be the last + * reference on the blob. */ + list_del_init(&blob->head_file); + mutex_unlock(&dev->mode_config.blob_lock); + + /* One reference from lookup, and one from the filp. */ + drm_property_unreference_blob(blob); + drm_property_unreference_blob(blob); + + return 0; + +err: + mutex_unlock(&dev->mode_config.blob_lock); + drm_property_unreference_blob(blob); + + return ret; +} + +/* Some properties could refer to dynamic refcnt'd objects, or things that + * need special locking to handle lifetime issues (ie. to ensure the prop + * value doesn't become invalid part way through the property update due to + * race). The value returned by reference via 'obj' should be passed back + * to drm_property_change_valid_put() after the property is set (and the + * object to which the property is attached has a chance to take it's own + * reference). + */ +bool drm_property_change_valid_get(struct drm_property *property, + uint64_t value, struct drm_mode_object **ref) +{ + int i; + + if (property->flags & DRM_MODE_PROP_IMMUTABLE) + return false; + + *ref = NULL; + + if (drm_property_type_is(property, DRM_MODE_PROP_RANGE)) { + if (value < property->values[0] || value > property->values[1]) + return false; + return true; + } else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE)) { + int64_t svalue = U642I64(value); + + if (svalue < U642I64(property->values[0]) || + svalue > U642I64(property->values[1])) + return false; + return true; + } else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) { + uint64_t valid_mask = 0; + + for (i = 0; i < property->num_values; i++) + valid_mask |= (1ULL << property->values[i]); + return !(value & ~valid_mask); + } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) { + struct drm_property_blob *blob; + + if (value == 0) + return true; + + blob = drm_property_lookup_blob(property->dev, value); + if (blob) { + *ref = &blob->base; + return true; + } else { + return false; + } + } else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) { + /* a zero value for an object property translates to null: */ + if (value == 0) + return true; + + *ref = __drm_mode_object_find(property->dev, value, + property->values[0]); + return *ref != NULL; + } + + for (i = 0; i < property->num_values; i++) + if (property->values[i] == value) + return true; + return false; +} + +void drm_property_change_valid_put(struct drm_property *property, + struct drm_mode_object *ref) +{ + if (!ref) + return; + + if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) { + drm_mode_object_unreference(ref); + } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) + drm_property_unreference_blob(obj_to_blob(ref)); +} diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c index a8e2c8603945..73e53a8d1b37 100644 --- a/drivers/gpu/drm/drm_rect.c +++ b/drivers/gpu/drm/drm_rect.c @@ -100,7 +100,7 @@ static int drm_calc_scale(int src, int dst) { int scale = 0; - if (src < 0 || dst < 0) + if (WARN_ON(src < 0 || dst < 0)) return -EINVAL; if (dst == 0) @@ -317,38 +317,38 @@ void drm_rect_rotate(struct drm_rect *r, { struct drm_rect tmp; - if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) { + if (rotation & (DRM_REFLECT_X | DRM_REFLECT_Y)) { tmp = *r; - if (rotation & BIT(DRM_REFLECT_X)) { + if (rotation & DRM_REFLECT_X) { r->x1 = width - tmp.x2; r->x2 = width - tmp.x1; } - if (rotation & BIT(DRM_REFLECT_Y)) { + if (rotation & DRM_REFLECT_Y) { r->y1 = height - tmp.y2; r->y2 = height - tmp.y1; } } switch (rotation & DRM_ROTATE_MASK) { - case BIT(DRM_ROTATE_0): + case DRM_ROTATE_0: break; - case BIT(DRM_ROTATE_90): + case DRM_ROTATE_90: tmp = *r; r->x1 = tmp.y1; r->x2 = tmp.y2; r->y1 = width - tmp.x2; r->y2 = width - tmp.x1; break; - case BIT(DRM_ROTATE_180): + case DRM_ROTATE_180: tmp = *r; r->x1 = width - tmp.x2; r->x2 = width - tmp.x1; r->y1 = height - tmp.y2; r->y2 = height - tmp.y1; break; - case BIT(DRM_ROTATE_270): + case DRM_ROTATE_270: tmp = *r; r->x1 = height - tmp.y2; r->x2 = height - tmp.y1; @@ -392,23 +392,23 @@ void drm_rect_rotate_inv(struct drm_rect *r, struct drm_rect tmp; switch (rotation & DRM_ROTATE_MASK) { - case BIT(DRM_ROTATE_0): + case DRM_ROTATE_0: break; - case BIT(DRM_ROTATE_90): + case DRM_ROTATE_90: tmp = *r; r->x1 = width - tmp.y2; r->x2 = width - tmp.y1; r->y1 = tmp.x1; r->y2 = tmp.x2; break; - case BIT(DRM_ROTATE_180): + case DRM_ROTATE_180: tmp = *r; r->x1 = width - tmp.x2; r->x2 = width - tmp.x1; r->y1 = height - tmp.y2; r->y2 = height - tmp.y1; break; - case BIT(DRM_ROTATE_270): + case DRM_ROTATE_270: tmp = *r; r->x1 = tmp.y1; r->x2 = tmp.y2; @@ -419,15 +419,15 @@ void drm_rect_rotate_inv(struct drm_rect *r, break; } - if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) { + if (rotation & (DRM_REFLECT_X | DRM_REFLECT_Y)) { tmp = *r; - if (rotation & BIT(DRM_REFLECT_X)) { + if (rotation & DRM_REFLECT_X) { r->x1 = width - tmp.x2; r->x2 = width - tmp.x1; } - if (rotation & BIT(DRM_REFLECT_Y)) { + if (rotation & DRM_REFLECT_Y) { r->y1 = height - tmp.y2; r->y2 = height - tmp.y1; } diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c index bf70431073f6..275bca44f38c 100644 --- a/drivers/gpu/drm/drm_scatter.c +++ b/drivers/gpu/drm/drm_scatter.c @@ -68,7 +68,7 @@ static void drm_sg_cleanup(struct drm_sg_mem * entry) void drm_legacy_sg_cleanup(struct drm_device *dev) { if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg && - !drm_core_check_feature(dev, DRIVER_MODESET)) { + drm_core_check_feature(dev, DRIVER_LEGACY)) { drm_sg_cleanup(dev->sg); dev->sg = NULL; } @@ -88,7 +88,7 @@ int drm_legacy_sg_alloc(struct drm_device *dev, void *data, DRM_DEBUG("\n"); - if (drm_core_check_feature(dev, DRIVER_MODESET)) + if (!drm_core_check_feature(dev, DRIVER_LEGACY)) return -EINVAL; if (!drm_core_check_feature(dev, DRIVER_SG)) @@ -201,7 +201,7 @@ int drm_legacy_sg_free(struct drm_device *dev, void *data, struct drm_scatter_gather *request = data; struct drm_sg_mem *entry; - if (drm_core_check_feature(dev, DRIVER_MODESET)) + if (!drm_core_check_feature(dev, DRIVER_LEGACY)) return -EINVAL; if (!drm_core_check_feature(dev, DRIVER_SG)) diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c index 0db36d27e90b..7b6d26e64977 100644 --- a/drivers/gpu/drm/drm_simple_kms_helper.c +++ b/drivers/gpu/drm/drm_simple_kms_helper.c @@ -34,6 +34,12 @@ static const struct drm_encoder_funcs drm_simple_kms_encoder_funcs = { .destroy = drm_encoder_cleanup, }; +static int drm_simple_kms_crtc_check(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + return drm_atomic_add_affected_planes(state->state, crtc); +} + static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc) { struct drm_simple_display_pipe *pipe; @@ -57,6 +63,7 @@ static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc) } static const struct drm_crtc_helper_funcs drm_simple_kms_crtc_helper_funcs = { + .atomic_check = drm_simple_kms_crtc_check, .disable = drm_simple_kms_crtc_disable, .enable = drm_simple_kms_crtc_enable, }; @@ -73,22 +80,9 @@ static const struct drm_crtc_funcs drm_simple_kms_crtc_funcs = { static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *plane_state) { - struct drm_rect src = { - .x1 = plane_state->src_x, - .y1 = plane_state->src_y, - .x2 = plane_state->src_x + plane_state->src_w, - .y2 = plane_state->src_y + plane_state->src_h, - }; - struct drm_rect dest = { - .x1 = plane_state->crtc_x, - .y1 = plane_state->crtc_y, - .x2 = plane_state->crtc_x + plane_state->crtc_w, - .y2 = plane_state->crtc_y + plane_state->crtc_h, - }; struct drm_rect clip = { 0 }; struct drm_simple_display_pipe *pipe; struct drm_crtc_state *crtc_state; - bool visible; int ret; pipe = container_of(plane, struct drm_simple_display_pipe, plane); @@ -102,17 +96,15 @@ static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane, clip.x2 = crtc_state->adjusted_mode.hdisplay; clip.y2 = crtc_state->adjusted_mode.vdisplay; - ret = drm_plane_helper_check_update(plane, &pipe->crtc, - plane_state->fb, - &src, &dest, &clip, - plane_state->rotation, - DRM_PLANE_HELPER_NO_SCALING, - DRM_PLANE_HELPER_NO_SCALING, - false, true, &visible); + + ret = drm_plane_helper_check_state(plane_state, &clip, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + false, true); if (ret) return ret; - if (!visible) + if (!plane_state->visible) return -EINVAL; if (!pipe->funcs || !pipe->funcs->check) @@ -148,16 +140,61 @@ static const struct drm_plane_funcs drm_simple_kms_plane_funcs = { }; /** + * drm_simple_display_pipe_attach_bridge - Attach a bridge to the display pipe + * @pipe: simple display pipe object + * @bridge: bridge to attach + * + * Makes it possible to still use the drm_simple_display_pipe helpers when + * a DRM bridge has to be used. + * + * Note that you probably want to initialize the pipe by passing a NULL + * connector to drm_simple_display_pipe_init(). + * + * Returns: + * Zero on success, negative error code on failure. + */ +int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe, + struct drm_bridge *bridge) +{ + bridge->encoder = &pipe->encoder; + pipe->encoder.bridge = bridge; + return drm_bridge_attach(pipe->encoder.dev, bridge); +} +EXPORT_SYMBOL(drm_simple_display_pipe_attach_bridge); + +/** + * drm_simple_display_pipe_detach_bridge - Detach the bridge from the display pipe + * @pipe: simple display pipe object + * + * Detaches the drm bridge previously attached with + * drm_simple_display_pipe_attach_bridge() + */ +void drm_simple_display_pipe_detach_bridge(struct drm_simple_display_pipe *pipe) +{ + if (WARN_ON(!pipe->encoder.bridge)) + return; + + drm_bridge_detach(pipe->encoder.bridge); + pipe->encoder.bridge = NULL; +} +EXPORT_SYMBOL(drm_simple_display_pipe_detach_bridge); + +/** * drm_simple_display_pipe_init - Initialize a simple display pipeline * @dev: DRM device * @pipe: simple display pipe object to initialize * @funcs: callbacks for the display pipe (optional) - * @formats: array of supported formats (%DRM_FORMAT_*) + * @formats: array of supported formats (DRM_FORMAT\_\*) * @format_count: number of elements in @formats - * @connector: connector to attach and register + * @connector: connector to attach and register (optional) * * Sets up a display pipeline which consist of a really simple - * plane-crtc-encoder pipe coupled with the provided connector. + * plane-crtc-encoder pipe. + * + * If a connector is supplied, the pipe will be coupled with the provided + * connector. You may supply a NULL connector when using drm bridges, that + * handle connectors themselves (see drm_simple_display_pipe_attach_bridge()). + * * Teardown of a simple display pipe is all handled automatically by the drm * core through calling drm_mode_config_cleanup(). Drivers afterwards need to * release the memory for the structure themselves. @@ -196,7 +233,7 @@ int drm_simple_display_pipe_init(struct drm_device *dev, encoder->possible_crtcs = 1 << drm_crtc_index(crtc); ret = drm_encoder_init(dev, encoder, &drm_simple_kms_encoder_funcs, DRM_MODE_ENCODER_NONE, NULL); - if (ret) + if (ret || !connector) return ret; return drm_mode_connector_attach_encoder(connector, encoder); diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c index f306c8855978..0aef432679f9 100644 --- a/drivers/gpu/drm/drm_vma_manager.c +++ b/drivers/gpu/drm/drm_vma_manager.c @@ -86,7 +86,6 @@ void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr, unsigned long page_offset, unsigned long size) { rwlock_init(&mgr->vm_lock); - mgr->vm_addr_space_rb = RB_ROOT; drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size); } EXPORT_SYMBOL(drm_vma_offset_manager_init); @@ -145,16 +144,16 @@ struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_m unsigned long start, unsigned long pages) { - struct drm_vma_offset_node *node, *best; + struct drm_mm_node *node, *best; struct rb_node *iter; unsigned long offset; - iter = mgr->vm_addr_space_rb.rb_node; + iter = mgr->vm_addr_space_mm.interval_tree.rb_node; best = NULL; while (likely(iter)) { - node = rb_entry(iter, struct drm_vma_offset_node, vm_rb); - offset = node->vm_node.start; + node = rb_entry(iter, struct drm_mm_node, rb); + offset = node->start; if (start >= offset) { iter = iter->rb_right; best = node; @@ -167,38 +166,17 @@ struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_m /* verify that the node spans the requested area */ if (best) { - offset = best->vm_node.start + best->vm_node.size; + offset = best->start + best->size; if (offset < start + pages) best = NULL; } - return best; -} -EXPORT_SYMBOL(drm_vma_offset_lookup_locked); - -/* internal helper to link @node into the rb-tree */ -static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr, - struct drm_vma_offset_node *node) -{ - struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node; - struct rb_node *parent = NULL; - struct drm_vma_offset_node *iter_node; - - while (likely(*iter)) { - parent = *iter; - iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb); + if (!best) + return NULL; - if (node->vm_node.start < iter_node->vm_node.start) - iter = &(*iter)->rb_left; - else if (node->vm_node.start > iter_node->vm_node.start) - iter = &(*iter)->rb_right; - else - BUG(); - } - - rb_link_node(&node->vm_rb, parent, iter); - rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb); + return container_of(best, struct drm_vma_offset_node, vm_node); } +EXPORT_SYMBOL(drm_vma_offset_lookup_locked); /** * drm_vma_offset_add() - Add offset node to manager @@ -240,8 +218,6 @@ int drm_vma_offset_add(struct drm_vma_offset_manager *mgr, if (ret) goto out_unlock; - _drm_vma_offset_add_rb(mgr, node); - out_unlock: write_unlock(&mgr->vm_lock); return ret; @@ -265,7 +241,6 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr, write_lock(&mgr->vm_lock); if (drm_mm_node_allocated(&node->vm_node)) { - rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb); drm_mm_remove_node(&node->vm_node); memset(&node->vm_node, 0, sizeof(node->vm_node)); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index ffd1b32caa8d..e3164d90399d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -488,8 +488,7 @@ static const struct file_operations fops = { }; static struct drm_driver etnaviv_drm_driver = { - .driver_features = DRIVER_HAVE_IRQ | - DRIVER_GEM | + .driver_features = DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER, .open = etnaviv_open, @@ -533,8 +532,6 @@ static int etnaviv_bind(struct device *dev) if (!drm) return -ENOMEM; - drm->platformdev = to_platform_device(dev); - priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { dev_err(dev, "failed to allocate private data\n"); diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 877d2efa28e2..486943e70f70 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -105,7 +105,7 @@ static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit) atomic_inc(&exynos_crtc->pending_update); } - drm_atomic_helper_commit_planes(dev, state, false); + drm_atomic_helper_commit_planes(dev, state, 0); exynos_atomic_wait_for_commit(state); diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c index e50467a0deb0..a7e5486bd1e9 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c @@ -169,25 +169,10 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane, return; } -static void -fsl_dcu_drm_plane_cleanup_fb(struct drm_plane *plane, - const struct drm_plane_state *new_state) -{ -} - -static int -fsl_dcu_drm_plane_prepare_fb(struct drm_plane *plane, - const struct drm_plane_state *new_state) -{ - return 0; -} - static const struct drm_plane_helper_funcs fsl_dcu_drm_plane_helper_funcs = { .atomic_check = fsl_dcu_drm_plane_atomic_check, .atomic_disable = fsl_dcu_drm_plane_atomic_disable, .atomic_update = fsl_dcu_drm_plane_atomic_update, - .cleanup_fb = fsl_dcu_drm_plane_cleanup_fb, - .prepare_fb = fsl_dcu_drm_plane_prepare_fb, }; static void fsl_dcu_drm_plane_destroy(struct drm_plane *plane) diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c index db9f7d011832..0d2bb1682508 100644 --- a/drivers/gpu/drm/gma500/accel_2d.c +++ b/drivers/gpu/drm/gma500/accel_2d.c @@ -28,7 +28,6 @@ #include <linux/tty.h> #include <linux/slab.h> #include <linux/delay.h> -#include <linux/fb.h> #include <linux/init.h> #include <linux/console.h> diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index 38dc89083148..ea733ab5b1e0 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -415,14 +415,6 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector) if (ret) return ret; - /* Didn't get an EDID, so - * Set wide sync ranges so we get all modes - * handed to valid_mode for checking - */ - connector->display_info.min_vfreq = 0; - connector->display_info.max_vfreq = 200; - connector->display_info.min_hfreq = 0; - connector->display_info.max_hfreq = 200; if (mode_dev->panel_fixed_mode != NULL) { struct drm_display_mode *mode = drm_mode_duplicate(dev, mode_dev->panel_fixed_mode); diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index 0fcdce0817de..3a44e705db53 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -26,7 +26,6 @@ #include <linux/tty.h> #include <linux/slab.h> #include <linux/delay.h> -#include <linux/fb.h> #include <linux/init.h> #include <linux/console.h> diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c index 907cb51795c3..acb3848ef1c9 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c +++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c @@ -335,11 +335,6 @@ static int mdfld_dsi_connector_get_modes(struct drm_connector *connector) struct drm_display_mode *dup_mode = NULL; struct drm_device *dev = connector->dev; - connector->display_info.min_vfreq = 0; - connector->display_info.max_vfreq = 200; - connector->display_info.min_hfreq = 0; - connector->display_info.max_hfreq = 200; - if (fixed_mode) { dev_dbg(dev->dev, "fixed_mode %dx%d\n", fixed_mode->hdisplay, fixed_mode->vdisplay); diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c index ab696ca7eeec..eab6d889bde9 100644 --- a/drivers/gpu/drm/gma500/opregion.c +++ b/drivers/gpu/drm/gma500/opregion.c @@ -163,10 +163,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) if (bclp > 255) return ASLE_BACKLIGHT_FAILED; - if (config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)) { - int max = bd->props.max_brightness; - gma_backlight_set(dev, bclp * max / 255); - } + gma_backlight_set(dev, bclp * bd->props.max_brightness / 255); asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID; diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index e55733ca46d2..fd7c91254841 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -530,15 +530,6 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector) if (ret) return ret; - /* Didn't get an EDID, so - * Set wide sync ranges so we get all modes - * handed to valid_mode for checking - */ - connector->display_info.min_vfreq = 0; - connector->display_info.max_vfreq = 200; - connector->display_info.min_hfreq = 0; - connector->display_info.max_hfreq = 200; - if (mode_dev->panel_fixed_mode != NULL) { struct drm_display_mode *mode = drm_mode_duplicate(dev, mode_dev->panel_fixed_mode); diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c index 4fca0d6feebe..e5360726d80b 100644 --- a/drivers/gpu/drm/gma500/psb_intel_modes.c +++ b/drivers/gpu/drm/gma500/psb_intel_modes.c @@ -18,7 +18,6 @@ */ #include <linux/i2c.h> -#include <linux/fb.h> #include <drm/drmP.h> #include "psb_intel_drv.h" diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index c3707d47cd89..7e7a4d43d6b6 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -608,15 +608,17 @@ static void ade_rdma_set(void __iomem *base, struct drm_framebuffer *fb, u32 ch, u32 y, u32 in_h, u32 fmt) { struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, 0); + char *format_name; u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en; u32 stride = fb->pitches[0]; u32 addr = (u32)obj->paddr + y * stride; DRM_DEBUG_DRIVER("rdma%d: (y=%d, height=%d), stride=%d, paddr=0x%x\n", ch + 1, y, in_h, stride, (u32)obj->paddr); + format_name = drm_get_format_name(fb->pixel_format); DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%s)\n", - addr, fb->width, fb->height, fmt, - drm_get_format_name(fb->pixel_format)); + addr, fb->width, fb->height, fmt, format_name); + kfree(format_name); /* get reg offset */ reg_ctrl = RD_CH_CTRL(ch); @@ -815,19 +817,6 @@ static void ade_disable_channel(struct ade_plane *aplane) ade_compositor_routing_disable(base, ch); } -static int ade_plane_prepare_fb(struct drm_plane *plane, - const struct drm_plane_state *new_state) -{ - /* do nothing */ - return 0; -} - -static void ade_plane_cleanup_fb(struct drm_plane *plane, - const struct drm_plane_state *old_state) -{ - /* do nothing */ -} - static int ade_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) { @@ -895,8 +884,6 @@ static void ade_plane_atomic_disable(struct drm_plane *plane, } static const struct drm_plane_helper_funcs ade_plane_helper_funcs = { - .prepare_fb = ade_plane_prepare_fb, - .cleanup_fb = ade_plane_cleanup_fb, .atomic_check = ade_plane_atomic_check, .atomic_update = ade_plane_atomic_update, .atomic_disable = ade_plane_atomic_disable, diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index 1edd9bc80294..1fc2f502d20d 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c @@ -169,7 +169,7 @@ static int kirin_gem_cma_dumb_create(struct drm_file *file, static struct drm_driver kirin_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | - DRIVER_ATOMIC | DRIVER_HAVE_IRQ, + DRIVER_ATOMIC, .fops = &kirin_drm_fops, .gem_free_object_unlocked = drm_gem_cma_free_object, diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c index 44f4a131c8dd..0be55dc1ef4b 100644 --- a/drivers/gpu/drm/i810/i810_drv.c +++ b/drivers/gpu/drm/i810/i810_drv.c @@ -56,9 +56,7 @@ static const struct file_operations i810_driver_fops = { }; static struct drm_driver driver = { - .driver_features = - DRIVER_USE_AGP | - DRIVER_HAVE_DMA, + .driver_features = DRIVER_USE_AGP | DRIVER_HAVE_DMA | DRIVER_LEGACY, .dev_priv_size = sizeof(drm_i810_buf_priv_t), .load = i810_driver_load, .lastclose = i810_driver_lastclose, diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 684fc1cd08fa..a7da24640e88 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -3,12 +3,16 @@ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror +subdir-ccflags-y += \ + $(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA) # Please keep these build lists sorted! # core driver code i915-y := i915_drv.o \ i915_irq.o \ + i915_memcpy.o \ + i915_mm.o \ i915_params.o \ i915_pci.o \ i915_suspend.o \ @@ -25,7 +29,6 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o i915-y += i915_cmd_parser.o \ i915_gem_batch_pool.o \ i915_gem_context.o \ - i915_gem_debug.o \ i915_gem_dmabuf.o \ i915_gem_evict.o \ i915_gem_execbuffer.o \ @@ -33,6 +36,7 @@ i915-y += i915_cmd_parser.o \ i915_gem_gtt.o \ i915_gem.o \ i915_gem_render_state.o \ + i915_gem_request.o \ i915_gem_shrinker.o \ i915_gem_stolen.o \ i915_gem_tiling.o \ @@ -40,6 +44,7 @@ i915-y += i915_cmd_parser.o \ i915_gpu_error.o \ i915_trace_points.o \ intel_breadcrumbs.o \ + intel_engine_cs.o \ intel_lrc.o \ intel_mocs.o \ intel_ringbuffer.o \ @@ -109,6 +114,6 @@ i915-y += intel_gvt.o include $(src)/gvt/Makefile endif -obj-$(CONFIG_DRM_I915) += i915.o +obj-$(CONFIG_DRM_I915) += i915.o CFLAGS_i915_trace_points.o := -I$(src) diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index b0fd6a7b0603..3c72b3b103e7 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -62,23 +62,23 @@ * The parser always rejects such commands. * * The majority of the problematic commands fall in the MI_* range, with only a - * few specific commands on each ring (e.g. PIPE_CONTROL and MI_FLUSH_DW). + * few specific commands on each engine (e.g. PIPE_CONTROL and MI_FLUSH_DW). * * Implementation: - * Each ring maintains tables of commands and registers which the parser uses in - * scanning batch buffers submitted to that ring. + * Each engine maintains tables of commands and registers which the parser + * uses in scanning batch buffers submitted to that engine. * * Since the set of commands that the parser must check for is significantly * smaller than the number of commands supported, the parser tables contain only * those commands required by the parser. This generally works because command * opcode ranges have standard command length encodings. So for commands that * the parser does not need to check, it can easily skip them. This is - * implemented via a per-ring length decoding vfunc. + * implemented via a per-engine length decoding vfunc. * * Unfortunately, there are a number of commands that do not follow the standard * length encoding for their opcode range, primarily amongst the MI_* commands. * To handle this, the parser provides a way to define explicit "skip" entries - * in the per-ring command tables. + * in the per-engine command tables. * * Other command table entries map fairly directly to high level categories * mentioned above: rejected, master-only, register whitelist. The parser @@ -86,24 +86,25 @@ * general bitmasking mechanism. */ -#define STD_MI_OPCODE_MASK 0xFF800000 -#define STD_3D_OPCODE_MASK 0xFFFF0000 -#define STD_2D_OPCODE_MASK 0xFFC00000 -#define STD_MFX_OPCODE_MASK 0xFFFF0000 +#define STD_MI_OPCODE_SHIFT (32 - 9) +#define STD_3D_OPCODE_SHIFT (32 - 16) +#define STD_2D_OPCODE_SHIFT (32 - 10) +#define STD_MFX_OPCODE_SHIFT (32 - 16) +#define MIN_OPCODE_SHIFT 16 #define CMD(op, opm, f, lm, fl, ...) \ { \ .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \ - .cmd = { (op), (opm) }, \ + .cmd = { (op), ~0u << (opm) }, \ .length = { (lm) }, \ __VA_ARGS__ \ } /* Convenience macros to compress the tables */ -#define SMI STD_MI_OPCODE_MASK -#define S3D STD_3D_OPCODE_MASK -#define S2D STD_2D_OPCODE_MASK -#define SMFX STD_MFX_OPCODE_MASK +#define SMI STD_MI_OPCODE_SHIFT +#define S3D STD_3D_OPCODE_SHIFT +#define S2D STD_2D_OPCODE_SHIFT +#define SMFX STD_MFX_OPCODE_SHIFT #define F true #define S CMD_DESC_SKIP #define R CMD_DESC_REJECT @@ -350,6 +351,9 @@ static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = { CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), }; +static const struct drm_i915_cmd_descriptor noop_desc = + CMD(MI_NOOP, SMI, F, 1, S); + #undef CMD #undef SMI #undef S3D @@ -458,6 +462,7 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = { REG32(GEN7_GPGPU_DISPATCHDIMX), REG32(GEN7_GPGPU_DISPATCHDIMY), REG32(GEN7_GPGPU_DISPATCHDIMZ), + REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE), REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 0), REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 1), REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 2), @@ -473,6 +478,7 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = { REG32(GEN7_L3SQCREG1), REG32(GEN7_L3CNTLREG2), REG32(GEN7_L3CNTLREG3), + REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), }; static const struct drm_i915_reg_descriptor hsw_render_regs[] = { @@ -502,7 +508,10 @@ static const struct drm_i915_reg_descriptor hsw_render_regs[] = { }; static const struct drm_i915_reg_descriptor gen7_blt_regs[] = { + REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE), + REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE), REG32(BCS_SWCTRL), + REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), }; static const struct drm_i915_reg_descriptor ivb_master_regs[] = { @@ -603,7 +612,7 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header) return 0; } -static bool validate_cmds_sorted(struct intel_engine_cs *engine, +static bool validate_cmds_sorted(const struct intel_engine_cs *engine, const struct drm_i915_cmd_table *cmd_tables, int cmd_table_count) { @@ -624,8 +633,10 @@ static bool validate_cmds_sorted(struct intel_engine_cs *engine, u32 curr = desc->cmd.value & desc->cmd.mask; if (curr < previous) { - DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n", - engine->id, i, j, curr, previous); + DRM_ERROR("CMD: %s [%d] command table not sorted: " + "table=%d entry=%d cmd=0x%08X prev=0x%08X\n", + engine->name, engine->id, + i, j, curr, previous); ret = false; } @@ -636,7 +647,7 @@ static bool validate_cmds_sorted(struct intel_engine_cs *engine, return ret; } -static bool check_sorted(int ring_id, +static bool check_sorted(const struct intel_engine_cs *engine, const struct drm_i915_reg_descriptor *reg_table, int reg_count) { @@ -648,8 +659,10 @@ static bool check_sorted(int ring_id, u32 curr = i915_mmio_reg_offset(reg_table[i].addr); if (curr < previous) { - DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n", - ring_id, i, curr, previous); + DRM_ERROR("CMD: %s [%d] register table not sorted: " + "entry=%d reg=0x%08X prev=0x%08X\n", + engine->name, engine->id, + i, curr, previous); ret = false; } @@ -666,7 +679,7 @@ static bool validate_regs_sorted(struct intel_engine_cs *engine) for (i = 0; i < engine->reg_table_count; i++) { table = &engine->reg_tables[i]; - if (!check_sorted(engine->id, table->regs, table->num_regs)) + if (!check_sorted(engine, table->regs, table->num_regs)) return false; } @@ -687,12 +700,26 @@ struct cmd_node { * non-opcode bits being set. But if we don't include those bits, some 3D * commands may hash to the same bucket due to not including opcode bits that * make the command unique. For now, we will risk hashing to the same bucket. - * - * If we attempt to generate a perfect hash, we should be able to look at bits - * 31:29 of a command from a batch buffer and use the full mask for that - * client. The existing INSTR_CLIENT_MASK/SHIFT defines can be used for this. */ -#define CMD_HASH_MASK STD_MI_OPCODE_MASK +static inline u32 cmd_header_key(u32 x) +{ + u32 shift; + + switch (x >> INSTR_CLIENT_SHIFT) { + default: + case INSTR_MI_CLIENT: + shift = STD_MI_OPCODE_SHIFT; + break; + case INSTR_RC_CLIENT: + shift = STD_3D_OPCODE_SHIFT; + break; + case INSTR_BC_CLIENT: + shift = STD_2D_OPCODE_SHIFT; + break; + } + + return x >> shift; +} static int init_hash_table(struct intel_engine_cs *engine, const struct drm_i915_cmd_table *cmd_tables, @@ -716,7 +743,7 @@ static int init_hash_table(struct intel_engine_cs *engine, desc_node->desc = desc; hash_add(engine->cmd_hash, &desc_node->node, - desc->cmd.value & CMD_HASH_MASK); + cmd_header_key(desc->cmd.value)); } } @@ -736,23 +763,21 @@ static void fini_hash_table(struct intel_engine_cs *engine) } /** - * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer + * intel_engine_init_cmd_parser() - set cmd parser related fields for an engine * @engine: the engine to initialize * * Optionally initializes fields related to batch buffer command parsing in the * struct intel_engine_cs based on whether the platform requires software * command parsing. - * - * Return: non-zero if initialization fails */ -int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) +void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) { const struct drm_i915_cmd_table *cmd_tables; int cmd_table_count; int ret; if (!IS_GEN7(engine->i915)) - return 0; + return; switch (engine->id) { case RCS: @@ -806,36 +831,38 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; break; default: - DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n", - engine->id); - BUG(); + MISSING_CASE(engine->id); + return; } - BUG_ON(!validate_cmds_sorted(engine, cmd_tables, cmd_table_count)); - BUG_ON(!validate_regs_sorted(engine)); - - WARN_ON(!hash_empty(engine->cmd_hash)); + if (!validate_cmds_sorted(engine, cmd_tables, cmd_table_count)) { + DRM_ERROR("%s: command descriptions are not sorted\n", + engine->name); + return; + } + if (!validate_regs_sorted(engine)) { + DRM_ERROR("%s: registers are not sorted\n", engine->name); + return; + } ret = init_hash_table(engine, cmd_tables, cmd_table_count); if (ret) { - DRM_ERROR("CMD: cmd_parser_init failed!\n"); + DRM_ERROR("%s: initialised failed!\n", engine->name); fini_hash_table(engine); - return ret; + return; } engine->needs_cmd_parser = true; - - return 0; } /** - * i915_cmd_parser_fini_ring() - clean up cmd parser related fields + * intel_engine_cleanup_cmd_parser() - clean up cmd parser related fields * @engine: the engine to clean up * * Releases any resources related to command parsing that may have been - * initialized for the specified ring. + * initialized for the specified engine. */ -void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine) +void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine) { if (!engine->needs_cmd_parser) return; @@ -850,12 +877,9 @@ find_cmd_in_table(struct intel_engine_cs *engine, struct cmd_node *desc_node; hash_for_each_possible(engine->cmd_hash, desc_node, node, - cmd_header & CMD_HASH_MASK) { + cmd_header_key(cmd_header)) { const struct drm_i915_cmd_descriptor *desc = desc_node->desc; - u32 masked_cmd = desc->cmd.mask & cmd_header; - u32 masked_value = desc->cmd.value & desc->cmd.mask; - - if (masked_cmd == masked_value) + if (((cmd_header ^ desc->cmd.value) & desc->cmd.mask) == 0) return desc; } @@ -866,18 +890,21 @@ find_cmd_in_table(struct intel_engine_cs *engine, * Returns a pointer to a descriptor for the command specified by cmd_header. * * The caller must supply space for a default descriptor via the default_desc - * parameter. If no descriptor for the specified command exists in the ring's + * parameter. If no descriptor for the specified command exists in the engine's * command parser tables, this function fills in default_desc based on the - * ring's default length encoding and returns default_desc. + * engine's default length encoding and returns default_desc. */ static const struct drm_i915_cmd_descriptor* find_cmd(struct intel_engine_cs *engine, u32 cmd_header, + const struct drm_i915_cmd_descriptor *desc, struct drm_i915_cmd_descriptor *default_desc) { - const struct drm_i915_cmd_descriptor *desc; u32 mask; + if (((cmd_header ^ desc->cmd.value) & desc->cmd.mask) == 0) + return desc; + desc = find_cmd_in_table(engine, cmd_header); if (desc) return desc; @@ -886,152 +913,140 @@ find_cmd(struct intel_engine_cs *engine, if (!mask) return NULL; - BUG_ON(!default_desc); - default_desc->flags = CMD_DESC_SKIP; + default_desc->cmd.value = cmd_header; + default_desc->cmd.mask = ~0u << MIN_OPCODE_SHIFT; default_desc->length.mask = mask; - + default_desc->flags = CMD_DESC_SKIP; return default_desc; } static const struct drm_i915_reg_descriptor * -find_reg(const struct drm_i915_reg_descriptor *table, - int count, u32 addr) +__find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr) { - int i; - - for (i = 0; i < count; i++) { - if (i915_mmio_reg_offset(table[i].addr) == addr) - return &table[i]; + int start = 0, end = count; + while (start < end) { + int mid = start + (end - start) / 2; + int ret = addr - i915_mmio_reg_offset(table[mid].addr); + if (ret < 0) + end = mid; + else if (ret > 0) + start = mid + 1; + else + return &table[mid]; } - return NULL; } static const struct drm_i915_reg_descriptor * -find_reg_in_tables(const struct drm_i915_reg_table *tables, - int count, bool is_master, u32 addr) +find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr) { - int i; - const struct drm_i915_reg_table *table; - const struct drm_i915_reg_descriptor *reg; + const struct drm_i915_reg_table *table = engine->reg_tables; + int count = engine->reg_table_count; - for (i = 0; i < count; i++) { - table = &tables[i]; + do { if (!table->master || is_master) { - reg = find_reg(table->regs, table->num_regs, - addr); + const struct drm_i915_reg_descriptor *reg; + + reg = __find_reg(table->regs, table->num_regs, addr); if (reg != NULL) return reg; } - } + } while (table++, --count); return NULL; } -static u32 *vmap_batch(struct drm_i915_gem_object *obj, - unsigned start, unsigned len) -{ - int i; - void *addr = NULL; - struct sg_page_iter sg_iter; - int first_page = start >> PAGE_SHIFT; - int last_page = (len + start + 4095) >> PAGE_SHIFT; - int npages = last_page - first_page; - struct page **pages; - - pages = drm_malloc_ab(npages, sizeof(*pages)); - if (pages == NULL) { - DRM_DEBUG_DRIVER("Failed to get space for pages\n"); - goto finish; - } - - i = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) { - pages[i++] = sg_page_iter_page(&sg_iter); - if (i == npages) - break; - } - - addr = vmap(pages, i, 0, PAGE_KERNEL); - if (addr == NULL) { - DRM_DEBUG_DRIVER("Failed to vmap pages\n"); - goto finish; - } - -finish: - if (pages) - drm_free_large(pages); - return (u32*)addr; -} - -/* Returns a vmap'd pointer to dest_obj, which the caller must unmap */ -static u32 *copy_batch(struct drm_i915_gem_object *dest_obj, +/* Returns a vmap'd pointer to dst_obj, which the caller must unmap */ +static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, struct drm_i915_gem_object *src_obj, u32 batch_start_offset, - u32 batch_len) + u32 batch_len, + bool *needs_clflush_after) { - int needs_clflush = 0; - void *src_base, *src; - void *dst = NULL; + unsigned int src_needs_clflush; + unsigned int dst_needs_clflush; + void *dst, *src; int ret; - if (batch_len > dest_obj->base.size || - batch_len + batch_start_offset > src_obj->base.size) - return ERR_PTR(-E2BIG); - - if (WARN_ON(dest_obj->pages_pin_count == 0)) - return ERR_PTR(-ENODEV); - - ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush); - if (ret) { - DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n"); + ret = i915_gem_obj_prepare_shmem_read(src_obj, &src_needs_clflush); + if (ret) return ERR_PTR(ret); - } - src_base = vmap_batch(src_obj, batch_start_offset, batch_len); - if (!src_base) { - DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n"); - ret = -ENOMEM; + ret = i915_gem_obj_prepare_shmem_write(dst_obj, &dst_needs_clflush); + if (ret) { + dst = ERR_PTR(ret); goto unpin_src; } - ret = i915_gem_object_set_to_cpu_domain(dest_obj, true); - if (ret) { - DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n"); - goto unmap_src; + dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB); + if (IS_ERR(dst)) + goto unpin_dst; + + src = ERR_PTR(-ENODEV); + if (src_needs_clflush && + i915_memcpy_from_wc((void *)(uintptr_t)batch_start_offset, 0, 0)) { + src = i915_gem_object_pin_map(src_obj, I915_MAP_WC); + if (!IS_ERR(src)) { + i915_memcpy_from_wc(dst, + src + batch_start_offset, + ALIGN(batch_len, 16)); + i915_gem_object_unpin_map(src_obj); + } } - - dst = vmap_batch(dest_obj, 0, batch_len); - if (!dst) { - DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n"); - ret = -ENOMEM; - goto unmap_src; + if (IS_ERR(src)) { + void *ptr; + int offset, n; + + offset = offset_in_page(batch_start_offset); + + /* We can avoid clflushing partial cachelines before the write + * if we only every write full cache-lines. Since we know that + * both the source and destination are in multiples of + * PAGE_SIZE, we can simply round up to the next cacheline. + * We don't care about copying too much here as we only + * validate up to the end of the batch. + */ + if (dst_needs_clflush & CLFLUSH_BEFORE) + batch_len = roundup(batch_len, + boot_cpu_data.x86_clflush_size); + + ptr = dst; + for (n = batch_start_offset >> PAGE_SHIFT; batch_len; n++) { + int len = min_t(int, batch_len, PAGE_SIZE - offset); + + src = kmap_atomic(i915_gem_object_get_page(src_obj, n)); + if (src_needs_clflush) + drm_clflush_virt_range(src + offset, len); + memcpy(ptr, src + offset, len); + kunmap_atomic(src); + + ptr += len; + batch_len -= len; + offset = 0; + } } - src = src_base + offset_in_page(batch_start_offset); - if (needs_clflush) - drm_clflush_virt_range(src, batch_len); - - memcpy(dst, src, batch_len); + /* dst_obj is returned with vmap pinned */ + *needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER; -unmap_src: - vunmap(src_base); +unpin_dst: + i915_gem_obj_finish_shmem_access(dst_obj); unpin_src: - i915_gem_object_unpin_pages(src_obj); - - return ret ? ERR_PTR(ret) : dst; + i915_gem_obj_finish_shmem_access(src_obj); + return dst; } /** - * i915_needs_cmd_parser() - should a given ring use software command parsing? + * intel_engine_needs_cmd_parser() - should a given engine use software + * command parsing? * @engine: the engine in question * * Only certain platforms require software batch buffer command parsing, and * only when enabled via module parameter. * - * Return: true if the ring requires software command parsing + * Return: true if the engine requires software command parsing */ -bool i915_needs_cmd_parser(struct intel_engine_cs *engine) +bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine) { if (!engine->needs_cmd_parser) return false; @@ -1048,6 +1063,9 @@ static bool check_cmd(const struct intel_engine_cs *engine, const bool is_master, bool *oacontrol_set) { + if (desc->flags & CMD_DESC_SKIP) + return true; + if (desc->flags & CMD_DESC_REJECT) { DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd); return false; @@ -1072,14 +1090,11 @@ static bool check_cmd(const struct intel_engine_cs *engine, offset += step) { const u32 reg_addr = cmd[offset] & desc->reg.mask; const struct drm_i915_reg_descriptor *reg = - find_reg_in_tables(engine->reg_tables, - engine->reg_table_count, - is_master, - reg_addr); + find_reg(engine, is_master, reg_addr); if (!reg) { - DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n", - reg_addr, *cmd, engine->id); + DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (exec_id=%d)\n", + reg_addr, *cmd, engine->exec_id); return false; } @@ -1159,11 +1174,11 @@ static bool check_cmd(const struct intel_engine_cs *engine, desc->bits[i].mask; if (dword != desc->bits[i].expected) { - DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n", + DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (exec_id=%d)\n", *cmd, desc->bits[i].mask, desc->bits[i].expected, - dword, engine->id); + dword, engine->exec_id); return false; } } @@ -1189,23 +1204,26 @@ static bool check_cmd(const struct intel_engine_cs *engine, * Return: non-zero if the parser finds violations or otherwise fails; -EACCES * if the batch appears legal but should use hardware parsing */ -int i915_parse_cmds(struct intel_engine_cs *engine, - struct drm_i915_gem_object *batch_obj, - struct drm_i915_gem_object *shadow_batch_obj, - u32 batch_start_offset, - u32 batch_len, - bool is_master) +int intel_engine_cmd_parser(struct intel_engine_cs *engine, + struct drm_i915_gem_object *batch_obj, + struct drm_i915_gem_object *shadow_batch_obj, + u32 batch_start_offset, + u32 batch_len, + bool is_master) { - u32 *cmd, *batch_base, *batch_end; - struct drm_i915_cmd_descriptor default_desc = { 0 }; + u32 *cmd, *batch_end; + struct drm_i915_cmd_descriptor default_desc = noop_desc; + const struct drm_i915_cmd_descriptor *desc = &default_desc; bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */ + bool needs_clflush_after = false; int ret = 0; - batch_base = copy_batch(shadow_batch_obj, batch_obj, - batch_start_offset, batch_len); - if (IS_ERR(batch_base)) { + cmd = copy_batch(shadow_batch_obj, batch_obj, + batch_start_offset, batch_len, + &needs_clflush_after); + if (IS_ERR(cmd)) { DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n"); - return PTR_ERR(batch_base); + return PTR_ERR(cmd); } /* @@ -1213,17 +1231,14 @@ int i915_parse_cmds(struct intel_engine_cs *engine, * large or larger and copy_batch() will write MI_NOPs to the extra * space. Parsing should be faster in some cases this way. */ - batch_end = batch_base + (batch_len / sizeof(*batch_end)); - - cmd = batch_base; + batch_end = cmd + (batch_len / sizeof(*batch_end)); while (cmd < batch_end) { - const struct drm_i915_cmd_descriptor *desc; u32 length; if (*cmd == MI_BATCH_BUFFER_END) break; - desc = find_cmd(engine, *cmd, &default_desc); + desc = find_cmd(engine, *cmd, desc, &default_desc); if (!desc) { DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n", *cmd); @@ -1274,7 +1289,9 @@ int i915_parse_cmds(struct intel_engine_cs *engine, ret = -EINVAL; } - vunmap(batch_base); + if (ret == 0 && needs_clflush_after) + drm_clflush_virt_range(shadow_batch_obj->mapping, batch_len); + i915_gem_object_unpin_map(shadow_batch_obj); return ret; } @@ -1295,7 +1312,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) /* If the command parser is not enabled, report 0 - unsupported */ for_each_engine(engine, dev_priv) { - if (i915_needs_cmd_parser(engine)) { + if (intel_engine_needs_cmd_parser(engine)) { active = true; break; } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 844fea795bae..a95d7bc81fb9 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -40,12 +40,6 @@ #include <drm/i915_drm.h> #include "i915_drv.h" -enum { - ACTIVE_LIST, - INACTIVE_LIST, - PINNED_LIST, -}; - /* As the drm_debugfs_init() routines are called before dev->dev_private is * allocated we need to hook into the minor for release. */ static int @@ -91,7 +85,7 @@ static int i915_capabilities(struct seq_file *m, void *data) static char get_active_flag(struct drm_i915_gem_object *obj) { - return obj->active ? '*' : ' '; + return i915_gem_object_is_active(obj) ? '*' : ' '; } static char get_pin_flag(struct drm_i915_gem_object *obj) @@ -101,7 +95,7 @@ static char get_pin_flag(struct drm_i915_gem_object *obj) static char get_tiling_flag(struct drm_i915_gem_object *obj) { - switch (obj->tiling_mode) { + switch (i915_gem_object_get_tiling(obj)) { default: case I915_TILING_NONE: return ' '; case I915_TILING_X: return 'X'; @@ -111,7 +105,7 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj) static char get_global_flag(struct drm_i915_gem_object *obj) { - return i915_gem_obj_to_ggtt(obj) ? 'g' : ' '; + return i915_gem_object_to_ggtt(obj, NULL) ? 'g' : ' '; } static char get_pin_mapped_flag(struct drm_i915_gem_object *obj) @@ -125,7 +119,7 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) struct i915_vma *vma; list_for_each_entry(vma, &obj->vma_list, obj_link) { - if (vma->is_ggtt && drm_mm_node_allocated(&vma->node)) + if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node)) size += vma->node.size; } @@ -138,6 +132,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct intel_engine_cs *engine; struct i915_vma *vma; + unsigned int frontbuffer_bits; int pin_count = 0; enum intel_engine_id id; @@ -155,30 +150,36 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) obj->base.write_domain); for_each_engine_id(engine, dev_priv, id) seq_printf(m, "%x ", - i915_gem_request_get_seqno(obj->last_read_req[id])); - seq_printf(m, "] %x %x%s%s%s", - i915_gem_request_get_seqno(obj->last_write_req), - i915_gem_request_get_seqno(obj->last_fenced_req), + i915_gem_active_get_seqno(&obj->last_read[id], + &obj->base.dev->struct_mutex)); + seq_printf(m, "] %x %s%s%s", + i915_gem_active_get_seqno(&obj->last_write, + &obj->base.dev->struct_mutex), i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level), obj->dirty ? " dirty" : "", obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); if (obj->base.name) seq_printf(m, " (name: %d)", obj->base.name); list_for_each_entry(vma, &obj->vma_list, obj_link) { - if (vma->pin_count > 0) + if (i915_vma_is_pinned(vma)) pin_count++; } seq_printf(m, " (pinned x %d)", pin_count); if (obj->pin_display) seq_printf(m, " (display)"); - if (obj->fence_reg != I915_FENCE_REG_NONE) - seq_printf(m, " (fence: %d)", obj->fence_reg); list_for_each_entry(vma, &obj->vma_list, obj_link) { + if (!drm_mm_node_allocated(&vma->node)) + continue; + seq_printf(m, " (%sgtt offset: %08llx, size: %08llx", - vma->is_ggtt ? "g" : "pp", + i915_vma_is_ggtt(vma) ? "g" : "pp", vma->node.start, vma->node.size); - if (vma->is_ggtt) + if (i915_vma_is_ggtt(vma)) seq_printf(m, ", type: %u", vma->ggtt_view.type); + if (vma->fence) + seq_printf(m, " , fence: %d%s", + vma->fence->id, + i915_gem_active_isset(&vma->last_fence) ? "*" : ""); seq_puts(m, ")"); } if (obj->stolen) @@ -192,58 +193,15 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) *t = '\0'; seq_printf(m, " (%s mappable)", s); } - if (obj->last_write_req != NULL) - seq_printf(m, " (%s)", - i915_gem_request_get_engine(obj->last_write_req)->name); - if (obj->frontbuffer_bits) - seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); -} - -static int i915_gem_object_list_info(struct seq_file *m, void *data) -{ - struct drm_info_node *node = m->private; - uintptr_t list = (uintptr_t) node->info_ent->data; - struct list_head *head; - struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; - struct i915_vma *vma; - u64 total_obj_size, total_gtt_size; - int count, ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - - /* FIXME: the user of this interface might want more than just GGTT */ - switch (list) { - case ACTIVE_LIST: - seq_puts(m, "Active:\n"); - head = &ggtt->base.active_list; - break; - case INACTIVE_LIST: - seq_puts(m, "Inactive:\n"); - head = &ggtt->base.inactive_list; - break; - default: - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - total_obj_size = total_gtt_size = count = 0; - list_for_each_entry(vma, head, vm_link) { - seq_printf(m, " "); - describe_obj(m, vma->obj); - seq_printf(m, "\n"); - total_obj_size += vma->obj->base.size; - total_gtt_size += vma->node.size; - count++; - } - mutex_unlock(&dev->struct_mutex); + engine = i915_gem_active_get_engine(&obj->last_write, + &obj->base.dev->struct_mutex); + if (engine) + seq_printf(m, " (%s)", engine->name); - seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", - count, total_obj_size, total_gtt_size); - return 0; + frontbuffer_bits = atomic_read(&obj->frontbuffer_bits); + if (frontbuffer_bits) + seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits); } static int obj_rank_by_stolen(void *priv, @@ -311,17 +269,6 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) return 0; } -#define count_objects(list, member) do { \ - list_for_each_entry(obj, list, member) { \ - size += i915_gem_obj_total_ggtt_size(obj); \ - ++count; \ - if (obj->map_and_fenceable) { \ - mappable_size += i915_gem_obj_ggtt_size(obj); \ - ++mappable_count; \ - } \ - } \ -} while (0) - struct file_stats { struct drm_i915_file_private *file_priv; unsigned long count; @@ -338,46 +285,29 @@ static int per_file_stats(int id, void *ptr, void *data) stats->count++; stats->total += obj->base.size; - + if (!obj->bind_count) + stats->unbound += obj->base.size; if (obj->base.name || obj->base.dma_buf) stats->shared += obj->base.size; - if (USES_FULL_PPGTT(obj->base.dev)) { - list_for_each_entry(vma, &obj->vma_list, obj_link) { - struct i915_hw_ppgtt *ppgtt; - - if (!drm_mm_node_allocated(&vma->node)) - continue; + list_for_each_entry(vma, &obj->vma_list, obj_link) { + if (!drm_mm_node_allocated(&vma->node)) + continue; - if (vma->is_ggtt) { - stats->global += obj->base.size; - continue; - } + if (i915_vma_is_ggtt(vma)) { + stats->global += vma->node.size; + } else { + struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm); - ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base); - if (ppgtt->file_priv != stats->file_priv) + if (ppgtt->base.file != stats->file_priv) continue; - - if (obj->active) /* XXX per-vma statistic */ - stats->active += obj->base.size; - else - stats->inactive += obj->base.size; - - return 0; } - } else { - if (i915_gem_obj_ggtt_bound(obj)) { - stats->global += obj->base.size; - if (obj->active) - stats->active += obj->base.size; - else - stats->inactive += obj->base.size; - return 0; - } - } - if (!list_empty(&obj->global_list)) - stats->unbound += obj->base.size; + if (i915_vma_is_active(vma)) + stats->active += vma->node.size; + else + stats->inactive += vma->node.size; + } return 0; } @@ -424,9 +354,9 @@ static int per_file_ctx_stats(int id, void *ptr, void *data) for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) { if (ctx->engine[n].state) - per_file_stats(0, ctx->engine[n].state, data); - if (ctx->engine[n].ringbuf) - per_file_stats(0, ctx->engine[n].ringbuf->obj, data); + per_file_stats(0, ctx->engine[n].state->obj, data); + if (ctx->engine[n].ring) + per_file_stats(0, ctx->engine[n].ring->vma->obj, data); } return 0; @@ -453,30 +383,16 @@ static void print_context_stats(struct seq_file *m, print_file_stats(m, "[k]contexts", stats); } -#define count_vmas(list, member) do { \ - list_for_each_entry(vma, list, member) { \ - size += i915_gem_obj_total_ggtt_size(vma->obj); \ - ++count; \ - if (vma->obj->map_and_fenceable) { \ - mappable_size += i915_gem_obj_ggtt_size(vma->obj); \ - ++mappable_count; \ - } \ - } \ -} while (0) - static int i915_gem_object_info(struct seq_file *m, void* data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; - u32 count, mappable_count, purgeable_count; - u64 size, mappable_size, purgeable_size; - unsigned long pin_mapped_count = 0, pin_mapped_purgeable_count = 0; - u64 pin_mapped_size = 0, pin_mapped_purgeable_size = 0; + u32 count, mapped_count, purgeable_count, dpy_count; + u64 size, mapped_size, purgeable_size, dpy_size; struct drm_i915_gem_object *obj; struct drm_file *file; - struct i915_vma *vma; int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -487,70 +403,53 @@ static int i915_gem_object_info(struct seq_file *m, void* data) dev_priv->mm.object_count, dev_priv->mm.object_memory); - size = count = mappable_size = mappable_count = 0; - count_objects(&dev_priv->mm.bound_list, global_list); - seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n", - count, mappable_count, size, mappable_size); - - size = count = mappable_size = mappable_count = 0; - count_vmas(&ggtt->base.active_list, vm_link); - seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n", - count, mappable_count, size, mappable_size); + size = count = 0; + mapped_size = mapped_count = 0; + purgeable_size = purgeable_count = 0; + list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { + size += obj->base.size; + ++count; - size = count = mappable_size = mappable_count = 0; - count_vmas(&ggtt->base.inactive_list, vm_link); - seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n", - count, mappable_count, size, mappable_size); + if (obj->madv == I915_MADV_DONTNEED) { + purgeable_size += obj->base.size; + ++purgeable_count; + } - size = count = purgeable_size = purgeable_count = 0; - list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { - size += obj->base.size, ++count; - if (obj->madv == I915_MADV_DONTNEED) - purgeable_size += obj->base.size, ++purgeable_count; if (obj->mapping) { - pin_mapped_count++; - pin_mapped_size += obj->base.size; - if (obj->pages_pin_count == 0) { - pin_mapped_purgeable_count++; - pin_mapped_purgeable_size += obj->base.size; - } + mapped_count++; + mapped_size += obj->base.size; } } seq_printf(m, "%u unbound objects, %llu bytes\n", count, size); - size = count = mappable_size = mappable_count = 0; + size = count = dpy_size = dpy_count = 0; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - if (obj->fault_mappable) { - size += i915_gem_obj_ggtt_size(obj); - ++count; - } + size += obj->base.size; + ++count; + if (obj->pin_display) { - mappable_size += i915_gem_obj_ggtt_size(obj); - ++mappable_count; + dpy_size += obj->base.size; + ++dpy_count; } + if (obj->madv == I915_MADV_DONTNEED) { purgeable_size += obj->base.size; ++purgeable_count; } + if (obj->mapping) { - pin_mapped_count++; - pin_mapped_size += obj->base.size; - if (obj->pages_pin_count == 0) { - pin_mapped_purgeable_count++; - pin_mapped_purgeable_size += obj->base.size; - } + mapped_count++; + mapped_size += obj->base.size; } } + seq_printf(m, "%u bound objects, %llu bytes\n", + count, size); seq_printf(m, "%u purgeable objects, %llu bytes\n", purgeable_count, purgeable_size); - seq_printf(m, "%u pinned mappable objects, %llu bytes\n", - mappable_count, mappable_size); - seq_printf(m, "%u fault mappable objects, %llu bytes\n", - count, size); - seq_printf(m, - "%lu [%lu] pin mapped objects, %llu [%llu] bytes [purgeable]\n", - pin_mapped_count, pin_mapped_purgeable_count, - pin_mapped_size, pin_mapped_purgeable_size); + seq_printf(m, "%u mapped objects, %llu bytes\n", + mapped_count, mapped_size); + seq_printf(m, "%u display objects (pinned), %llu bytes\n", + dpy_count, dpy_size); seq_printf(m, "%llu [%llu] gtt total\n", ggtt->base.total, ggtt->mappable_end - ggtt->base.start); @@ -563,6 +462,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data) print_context_stats(m, dev_priv); list_for_each_entry_reverse(file, &dev->filelist, lhead) { struct file_stats stats; + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_i915_gem_request *request; struct task_struct *task; memset(&stats, 0, sizeof(stats)); @@ -576,10 +477,17 @@ static int i915_gem_object_info(struct seq_file *m, void* data) * still alive (e.g. get_pid(current) => fork() => exit()). * Therefore, we need to protect this ->comm access using RCU. */ + mutex_lock(&dev->struct_mutex); + request = list_first_entry_or_null(&file_priv->mm.request_list, + struct drm_i915_gem_request, + client_list); rcu_read_lock(); - task = pid_task(file->pid, PIDTYPE_PID); + task = pid_task(request && request->ctx->pid ? + request->ctx->pid : file->pid, + PIDTYPE_PID); print_file_stats(m, task ? task->comm : "<unknown>", stats); rcu_read_unlock(); + mutex_unlock(&dev->struct_mutex); } mutex_unlock(&dev->filelist_mutex); @@ -590,8 +498,8 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - uintptr_t list = (uintptr_t) node->info_ent->data; struct drm_i915_private *dev_priv = to_i915(dev); + bool show_pin_display_only = !!data; struct drm_i915_gem_object *obj; u64 total_obj_size, total_gtt_size; int count, ret; @@ -602,7 +510,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data) total_obj_size = total_gtt_size = count = 0; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj)) + if (show_pin_display_only && !obj->pin_display) continue; seq_puts(m, " "); @@ -754,21 +662,20 @@ static int i915_gem_request_info(struct seq_file *m, void *data) int count; count = 0; - list_for_each_entry(req, &engine->request_list, list) + list_for_each_entry(req, &engine->request_list, link) count++; if (count == 0) continue; seq_printf(m, "%s requests: %d\n", engine->name, count); - list_for_each_entry(req, &engine->request_list, list) { + list_for_each_entry(req, &engine->request_list, link) { + struct pid *pid = req->ctx->pid; struct task_struct *task; rcu_read_lock(); - task = NULL; - if (req->pid) - task = pid_task(req->pid, PIDTYPE_PID); + task = pid ? pid_task(pid, PIDTYPE_PID) : NULL; seq_printf(m, " %x @ %d: %s [%d]\n", - req->seqno, + req->fence.seqno, (int) (jiffies - req->emitted_jiffies), task ? task->comm : "<unknown>", task ? task->pid : -1); @@ -793,8 +700,6 @@ static void i915_ring_seqno_info(struct seq_file *m, seq_printf(m, "Current sequence (%s): %x\n", engine->name, intel_engine_get_seqno(engine)); - seq_printf(m, "Current user interrupts (%s): %lx\n", - engine->name, READ_ONCE(engine->breadcrumbs.irq_wakeups)); spin_lock(&b->lock); for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { @@ -1033,14 +938,14 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); for (i = 0; i < dev_priv->num_fence_regs; i++) { - struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; + struct i915_vma *vma = dev_priv->fence_regs[i].vma; seq_printf(m, "Fence %d, pin count = %d, object = ", i, dev_priv->fence_regs[i].pin_count); - if (obj == NULL) + if (!vma) seq_puts(m, "unused"); else - describe_obj(m, obj); + describe_obj(m, vma->obj); seq_putc(m, '\n'); } @@ -1205,8 +1110,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused) intel_runtime_pm_get(dev_priv); - flush_delayed_work(&dev_priv->rps.delayed_resume_work); - if (IS_GEN5(dev)) { u16 rgvswctl = I915_READ16(MEMSWCTL); u16 rgvstat = I915_READ16(MEMSTAT_ILK); @@ -1381,6 +1284,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused) intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); seq_printf(m, "Min freq: %d MHz\n", intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); + seq_printf(m, "Boost freq: %d MHz\n", + intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq)); seq_printf(m, "Max freq: %d MHz\n", intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); seq_printf(m, @@ -1419,7 +1324,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) intel_runtime_pm_get(dev_priv); for_each_engine_id(engine, dev_priv, id) { - acthd[id] = intel_ring_get_active_head(engine); + acthd[id] = intel_engine_get_active_head(engine); seqno[id] = intel_engine_get_seqno(engine); } @@ -1440,11 +1345,10 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) engine->hangcheck.seqno, seqno[id], engine->last_submitted_seqno); - seq_printf(m, "\twaiters? %d\n", - intel_engine_has_waiter(engine)); - seq_printf(m, "\tuser interrupts = %lx [current %lx]\n", - engine->hangcheck.user_interrupts, - READ_ONCE(engine->breadcrumbs.irq_wakeups)); + seq_printf(m, "\twaiters? %s, fake irq active? %s\n", + yesno(intel_engine_has_waiter(engine)), + yesno(test_bit(engine->id, + &dev_priv->gpu_error.missed_irq_rings))); seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", (long long)engine->hangcheck.acthd, (long long)acthd[id]); @@ -1602,6 +1506,7 @@ static int gen6_drpc_info(struct seq_file *m) struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = to_i915(dev); u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; + u32 gen9_powergate_enable = 0, gen9_powergate_status = 0; unsigned forcewake_count; int count = 0, ret; @@ -1629,6 +1534,10 @@ static int gen6_drpc_info(struct seq_file *m) rpmodectl1 = I915_READ(GEN6_RP_CONTROL); rcctl1 = I915_READ(GEN6_RC_CONTROL); + if (INTEL_INFO(dev)->gen >= 9) { + gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE); + gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS); + } mutex_unlock(&dev->struct_mutex); mutex_lock(&dev_priv->rps.hw_lock); sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); @@ -1647,6 +1556,12 @@ static int gen6_drpc_info(struct seq_file *m) yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); seq_printf(m, "RC6 Enabled: %s\n", yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); + if (INTEL_INFO(dev)->gen >= 9) { + seq_printf(m, "Render Well Gating Enabled: %s\n", + yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE)); + seq_printf(m, "Media Well Gating Enabled: %s\n", + yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE)); + } seq_printf(m, "Deep RC6 Enabled: %s\n", yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); seq_printf(m, "Deepest RC6 Enabled: %s\n", @@ -1675,6 +1590,14 @@ static int gen6_drpc_info(struct seq_file *m) seq_printf(m, "Core Power Down: %s\n", yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); + if (INTEL_INFO(dev)->gen >= 9) { + seq_printf(m, "Render Power Well: %s\n", + (gen9_powergate_status & + GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down"); + seq_printf(m, "Media Power Well: %s\n", + (gen9_powergate_status & + GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down"); + } /* Not exactly sure what this is */ seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n", @@ -1692,7 +1615,7 @@ static int gen6_drpc_info(struct seq_file *m) GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); seq_printf(m, "RC6++ voltage: %dmV\n", GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); - return 0; + return i915_forcewake_domains(m, NULL); } static int i915_drpc_info(struct seq_file *m, void *unused) @@ -1896,8 +1819,6 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) intel_runtime_pm_get(dev_priv); - flush_delayed_work(&dev_priv->rps.delayed_resume_work); - ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); if (ret) goto out; @@ -2019,12 +1940,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) return 0; } -static void describe_ctx_ringbuf(struct seq_file *m, - struct intel_ringbuffer *ringbuf) +static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring) { seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)", - ringbuf->space, ringbuf->head, ringbuf->tail, - ringbuf->last_retired_head); + ring->space, ring->head, ring->tail, + ring->last_retired_head); } static int i915_context_status(struct seq_file *m, void *unused) @@ -2042,18 +1962,17 @@ static int i915_context_status(struct seq_file *m, void *unused) list_for_each_entry(ctx, &dev_priv->context_list, link) { seq_printf(m, "HW context %u ", ctx->hw_id); - if (IS_ERR(ctx->file_priv)) { - seq_puts(m, "(deleted) "); - } else if (ctx->file_priv) { - struct pid *pid = ctx->file_priv->file->pid; + if (ctx->pid) { struct task_struct *task; - task = get_pid_task(pid, PIDTYPE_PID); + task = get_pid_task(ctx->pid, PIDTYPE_PID); if (task) { seq_printf(m, "(%s [%d]) ", task->comm, task->pid); put_task_struct(task); } + } else if (IS_ERR(ctx->file_priv)) { + seq_puts(m, "(deleted) "); } else { seq_puts(m, "(kernel) "); } @@ -2067,9 +1986,9 @@ static int i915_context_status(struct seq_file *m, void *unused) seq_printf(m, "%s: ", engine->name); seq_putc(m, ce->initialised ? 'I' : 'i'); if (ce->state) - describe_obj(m, ce->state); - if (ce->ringbuf) - describe_ctx_ringbuf(m, ce->ringbuf); + describe_obj(m, ce->state->obj); + if (ce->ring) + describe_ctx_ring(m, ce->ring); seq_putc(m, '\n'); } @@ -2085,36 +2004,34 @@ static void i915_dump_lrc_obj(struct seq_file *m, struct i915_gem_context *ctx, struct intel_engine_cs *engine) { - struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; + struct i915_vma *vma = ctx->engine[engine->id].state; struct page *page; - uint32_t *reg_state; int j; - unsigned long ggtt_offset = 0; seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id); - if (ctx_obj == NULL) { - seq_puts(m, "\tNot allocated\n"); + if (!vma) { + seq_puts(m, "\tFake context\n"); return; } - if (!i915_gem_obj_ggtt_bound(ctx_obj)) - seq_puts(m, "\tNot bound in GGTT\n"); - else - ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj); + if (vma->flags & I915_VMA_GLOBAL_BIND) + seq_printf(m, "\tBound in GGTT at 0x%08x\n", + i915_ggtt_offset(vma)); - if (i915_gem_object_get_pages(ctx_obj)) { - seq_puts(m, "\tFailed to get pages for context object\n"); + if (i915_gem_object_get_pages(vma->obj)) { + seq_puts(m, "\tFailed to get pages for context object\n\n"); return; } - page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); - if (!WARN_ON(page == NULL)) { - reg_state = kmap_atomic(page); + page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN); + if (page) { + u32 *reg_state = kmap_atomic(page); for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) { - seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n", - ggtt_offset + 4096 + (j * 4), + seq_printf(m, + "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n", + j * 4, reg_state[j], reg_state[j + 1], reg_state[j + 2], reg_state[j + 3]); } @@ -2434,6 +2351,20 @@ static int count_irq_waiters(struct drm_i915_private *i915) return count; } +static const char *rps_power_to_str(unsigned int power) +{ + static const char * const strings[] = { + [LOW_POWER] = "low power", + [BETWEEN] = "mixed", + [HIGH_POWER] = "high power", + }; + + if (power >= ARRAY_SIZE(strings) || !strings[power]) + return "unknown"; + + return strings[power]; +} + static int i915_rps_boost_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; @@ -2445,12 +2376,17 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) seq_printf(m, "GPU busy? %s [%x]\n", yesno(dev_priv->gt.awake), dev_priv->gt.active_engines); seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); - seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n", - intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq), + seq_printf(m, "Frequency requested %d\n", + intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); + seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit), intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit), intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); + seq_printf(m, " idle:%d, efficient:%d, boost:%d\n", + intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), + intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), + intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq)); mutex_lock(&dev->filelist_mutex); spin_lock(&dev_priv->rps.client_lock); @@ -2467,16 +2403,35 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) list_empty(&file_priv->rps.link) ? "" : ", active"); rcu_read_unlock(); } - seq_printf(m, "Semaphore boosts: %d%s\n", - dev_priv->rps.semaphores.boosts, - list_empty(&dev_priv->rps.semaphores.link) ? "" : ", active"); - seq_printf(m, "MMIO flip boosts: %d%s\n", - dev_priv->rps.mmioflips.boosts, - list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active"); - seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts); + seq_printf(m, "Kernel (anonymous) boosts: %d\n", dev_priv->rps.boosts); spin_unlock(&dev_priv->rps.client_lock); mutex_unlock(&dev->filelist_mutex); + if (INTEL_GEN(dev_priv) >= 6 && + dev_priv->rps.enabled && + dev_priv->gt.active_engines) { + u32 rpup, rpupei; + u32 rpdown, rpdownei; + + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK; + rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK; + rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK; + rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK; + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + + seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n", + rps_power_to_str(dev_priv->rps.power)); + seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n", + 100 * rpup / rpupei, + dev_priv->rps.up_threshold); + seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n", + 100 * rpdown / rpdownei, + dev_priv->rps.down_threshold); + } else { + seq_puts(m, "\nRPS Autotuning inactive\n"); + } + return 0; } @@ -2543,6 +2498,7 @@ static void i915_guc_client_info(struct seq_file *m, struct i915_guc_client *client) { struct intel_engine_cs *engine; + enum intel_engine_id id; uint64_t tot = 0; seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n", @@ -2553,15 +2509,14 @@ static void i915_guc_client_info(struct seq_file *m, client->wq_size, client->wq_offset, client->wq_tail); seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space); - seq_printf(m, "\tFailed to queue: %u\n", client->q_fail); seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail); seq_printf(m, "\tLast submission result: %d\n", client->retcode); - for_each_engine(engine, dev_priv) { + for_each_engine_id(engine, dev_priv, id) { + u64 submissions = client->submissions[id]; + tot += submissions; seq_printf(m, "\tSubmissions: %llu %s\n", - client->submissions[engine->id], - engine->name); - tot += client->submissions[engine->id]; + submissions, engine->name); } seq_printf(m, "\tTotal: %llu\n", tot); } @@ -2574,6 +2529,7 @@ static int i915_guc_info(struct seq_file *m, void *data) struct intel_guc guc; struct i915_guc_client client = {}; struct intel_engine_cs *engine; + enum intel_engine_id id; u64 total = 0; if (!HAS_GUC_SCHED(dev_priv)) @@ -2600,11 +2556,11 @@ static int i915_guc_info(struct seq_file *m, void *data) seq_printf(m, "GuC last action error code: %d\n", guc.action_err); seq_printf(m, "\nGuC submissions:\n"); - for_each_engine(engine, dev_priv) { + for_each_engine_id(engine, dev_priv, id) { + u64 submissions = guc.submissions[id]; + total += submissions; seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n", - engine->name, guc.submissions[engine->id], - guc.last_seqno[engine->id]); - total += guc.submissions[engine->id]; + engine->name, submissions, guc.last_seqno[id]); } seq_printf(m, "\t%s: %llu\n", "Total", total); @@ -2621,15 +2577,15 @@ static int i915_guc_log_dump(struct seq_file *m, void *data) struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj; - u32 *log; + struct drm_i915_gem_object *obj; int i = 0, pg; - if (!log_obj) + if (!dev_priv->guc.log_vma) return 0; - for (pg = 0; pg < log_obj->base.size / PAGE_SIZE; pg++) { - log = kmap_atomic(i915_gem_object_get_page(log_obj, pg)); + obj = dev_priv->guc.log_vma->obj; + for (pg = 0; pg < obj->base.size / PAGE_SIZE; pg++) { + u32 *log = kmap_atomic(i915_gem_object_get_page(obj, pg)); for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4) seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n", @@ -3089,12 +3045,12 @@ static const char *plane_rotation(unsigned int rotation) */ snprintf(buf, sizeof(buf), "%s%s%s%s%s%s(0x%08x)", - (rotation & BIT(DRM_ROTATE_0)) ? "0 " : "", - (rotation & BIT(DRM_ROTATE_90)) ? "90 " : "", - (rotation & BIT(DRM_ROTATE_180)) ? "180 " : "", - (rotation & BIT(DRM_ROTATE_270)) ? "270 " : "", - (rotation & BIT(DRM_REFLECT_X)) ? "FLIPX " : "", - (rotation & BIT(DRM_REFLECT_Y)) ? "FLIPY " : "", + (rotation & DRM_ROTATE_0) ? "0 " : "", + (rotation & DRM_ROTATE_90) ? "90 " : "", + (rotation & DRM_ROTATE_180) ? "180 " : "", + (rotation & DRM_ROTATE_270) ? "270 " : "", + (rotation & DRM_REFLECT_X) ? "FLIPX " : "", + (rotation & DRM_REFLECT_Y) ? "FLIPY " : "", rotation); return buf; @@ -3109,6 +3065,7 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc) for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { struct drm_plane_state *state; struct drm_plane *plane = &intel_plane->base; + char *format_name; if (!plane->state) { seq_puts(m, "plane->state is NULL!\n"); @@ -3117,6 +3074,12 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc) state = plane->state; + if (state->fb) { + format_name = drm_get_format_name(state->fb->pixel_format); + } else { + format_name = kstrdup("N/A", GFP_KERNEL); + } + seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n", plane->base.id, plane_type(intel_plane->base.type), @@ -3130,8 +3093,10 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc) ((state->src_w & 0xffff) * 15625) >> 10, (state->src_h >> 16), ((state->src_h & 0xffff) * 15625) >> 10, - state->fb ? drm_get_format_name(state->fb->pixel_format) : "N/A", + format_name, plane_rotation(state->rotation)); + + kfree(format_name); } } @@ -3224,11 +3189,11 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_engine_cs *engine; - int num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + int num_rings = INTEL_INFO(dev)->num_rings; enum intel_engine_id id; int j, ret; - if (!i915_semaphore_is_enabled(dev_priv)) { + if (!i915.semaphores) { seq_puts(m, "Semaphores are disabled\n"); return 0; } @@ -3242,7 +3207,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) struct page *page; uint64_t *seqno; - page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0); + page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0); seqno = (uint64_t *)kmap_atomic(page); for_each_engine_id(engine, dev_priv, id) { @@ -3621,7 +3586,6 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, while (n_entries > 0) { struct intel_pipe_crc_entry *entry = &pipe_crc->entries[pipe_crc->tail]; - int ret; if (CIRC_CNT(pipe_crc->head, pipe_crc->tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) @@ -3638,8 +3602,7 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, spin_unlock_irq(&pipe_crc->lock); - ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN); - if (ret == PIPE_CRC_LINE_LEN) + if (copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN)) return -EFAULT; user_buf += PIPE_CRC_LINE_LEN; @@ -4921,7 +4884,7 @@ i915_drop_caches_set(void *data, u64 val) return ret; if (val & DROP_ACTIVE) { - ret = i915_gem_wait_for_idle(dev_priv); + ret = i915_gem_wait_for_idle(dev_priv, true); if (ret) goto unlock; } @@ -4950,20 +4913,11 @@ i915_max_freq_get(void *data, u64 *val) { struct drm_device *dev = data; struct drm_i915_private *dev_priv = to_i915(dev); - int ret; if (INTEL_INFO(dev)->gen < 6) return -ENODEV; - flush_delayed_work(&dev_priv->rps.delayed_resume_work); - - ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); - if (ret) - return ret; - *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); - mutex_unlock(&dev_priv->rps.hw_lock); - return 0; } @@ -4978,8 +4932,6 @@ i915_max_freq_set(void *data, u64 val) if (INTEL_INFO(dev)->gen < 6) return -ENODEV; - flush_delayed_work(&dev_priv->rps.delayed_resume_work); - DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); @@ -5017,20 +4969,11 @@ i915_min_freq_get(void *data, u64 *val) { struct drm_device *dev = data; struct drm_i915_private *dev_priv = to_i915(dev); - int ret; - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_GEN(dev_priv) < 6) return -ENODEV; - flush_delayed_work(&dev_priv->rps.delayed_resume_work); - - ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); - if (ret) - return ret; - *val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); - mutex_unlock(&dev_priv->rps.hw_lock); - return 0; } @@ -5042,11 +4985,9 @@ i915_min_freq_set(void *data, u64 val) u32 hw_max, hw_min; int ret; - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_GEN(dev_priv) < 6) return -ENODEV; - flush_delayed_work(&dev_priv->rps.delayed_resume_work); - DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); @@ -5268,7 +5209,8 @@ static void broadwell_sseu_device_status(struct drm_device *dev, static int i915_sseu_status(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = to_i915(node->minor->dev); + struct drm_device *dev = &dev_priv->drm; struct sseu_dev_status stat; if (INTEL_INFO(dev)->gen < 8) @@ -5298,6 +5240,9 @@ static int i915_sseu_status(struct seq_file *m, void *unused) seq_puts(m, "SSEU Device Status\n"); memset(&stat, 0, sizeof(stat)); + + intel_runtime_pm_get(dev_priv); + if (IS_CHERRYVIEW(dev)) { cherryview_sseu_device_status(dev, &stat); } else if (IS_BROADWELL(dev)) { @@ -5305,6 +5250,9 @@ static int i915_sseu_status(struct seq_file *m, void *unused) } else if (INTEL_INFO(dev)->gen >= 9) { gen9_sseu_device_status(dev, &stat); } + + intel_runtime_pm_put(dev_priv); + seq_printf(m, " Enabled Slice Total: %u\n", stat.slice_total); seq_printf(m, " Enabled Subslice Total: %u\n", @@ -5390,9 +5338,7 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_capabilities", i915_capabilities, 0}, {"i915_gem_objects", i915_gem_object_info, 0}, {"i915_gem_gtt", i915_gem_gtt_info, 0}, - {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, - {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, - {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, + {"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1}, {"i915_gem_stolen", i915_gem_stolen_list_info }, {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, {"i915_gem_request", i915_gem_request_info, 0}, diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 95ddd56b89f0..13ae340ef1f3 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -228,27 +228,6 @@ static void intel_detect_pch(struct drm_device *dev) pci_dev_put(pch); } -bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv) -{ - if (INTEL_GEN(dev_priv) < 6) - return false; - - if (i915.semaphores >= 0) - return i915.semaphores; - - /* TODO: make semaphores and Execlists play nicely together */ - if (i915.enable_execlists) - return false; - -#ifdef CONFIG_INTEL_IOMMU - /* Enable semaphores on SNB when IO remapping is off */ - if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) - return false; -#endif - - return true; -} - static int i915_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -324,7 +303,7 @@ static int i915_getparam(struct drm_device *dev, void *data, value = 1; break; case I915_PARAM_HAS_SEMAPHORES: - value = i915_semaphore_is_enabled(dev_priv); + value = i915.semaphores; break; case I915_PARAM_HAS_PRIME_VMAP_FLUSH: value = 1; @@ -706,7 +685,7 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; - ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary); + ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary); kfree(ap); @@ -848,6 +827,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, mutex_init(&dev_priv->wm.wm_mutex); mutex_init(&dev_priv->pps_mutex); + i915_memcpy_init_early(dev_priv); + ret = i915_workqueues_init(dev_priv); if (ret < 0) return ret; @@ -999,6 +980,9 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv) i915.enable_ppgtt = intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt); DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); + + i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores); + DRM_DEBUG_DRIVER("use GPU sempahores? %s\n", yesno(i915.semaphores)); } /** @@ -1011,8 +995,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv) static int i915_driver_init_hw(struct drm_i915_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; - struct i915_ggtt *ggtt = &dev_priv->ggtt; - uint32_t aperture_size; int ret; if (i915_inject_load_failure()) @@ -1022,16 +1004,10 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) intel_sanitize_options(dev_priv); - ret = i915_ggtt_init_hw(dev); + ret = i915_ggtt_probe_hw(dev_priv); if (ret) return ret; - ret = i915_ggtt_enable_hw(dev); - if (ret) { - DRM_ERROR("failed to enable GGTT\n"); - goto out_ggtt; - } - /* WARNING: Apparently we must kick fbdev drivers before vgacon, * otherwise the vga fbdev driver falls over. */ ret = i915_kick_out_firmware_fb(dev_priv); @@ -1046,6 +1022,16 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) goto out_ggtt; } + ret = i915_ggtt_init_hw(dev_priv); + if (ret) + return ret; + + ret = i915_ggtt_enable_hw(dev_priv); + if (ret) { + DRM_ERROR("failed to enable GGTT\n"); + goto out_ggtt; + } + pci_set_master(dev->pdev); /* overlay on gen2 is broken and can't address above 1G */ @@ -1058,7 +1044,6 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) } } - /* 965GM sometimes incorrectly writes to hardware status page (HWS) * using 32bit addressing, overwriting memory if HWS is located * above 4GB. @@ -1077,19 +1062,6 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) } } - aperture_size = ggtt->mappable_end; - - ggtt->mappable = - io_mapping_create_wc(ggtt->mappable_base, - aperture_size); - if (!ggtt->mappable) { - ret = -EIO; - goto out_ggtt; - } - - ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, - aperture_size); - pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); @@ -1118,7 +1090,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) return 0; out_ggtt: - i915_ggtt_cleanup_hw(dev); + i915_ggtt_cleanup_hw(dev_priv); return ret; } @@ -1130,15 +1102,12 @@ out_ggtt: static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; - struct i915_ggtt *ggtt = &dev_priv->ggtt; if (dev->pdev->msi_enabled) pci_disable_msi(dev->pdev); pm_qos_remove_request(&dev_priv->pm_qos); - arch_phys_wc_del(ggtt->mtrr); - io_mapping_free(ggtt->mappable); - i915_ggtt_cleanup_hw(dev); + i915_ggtt_cleanup_hw(dev_priv); } /** @@ -1343,7 +1312,7 @@ void i915_driver_unload(struct drm_device *dev) i915_destroy_error_state(dev); /* Flush any outstanding unpin_work. */ - flush_workqueue(dev_priv->wq); + drain_workqueue(dev_priv->wq); intel_guc_fini(dev); i915_gem_fini(dev); @@ -1458,8 +1427,6 @@ static int i915_drm_suspend(struct drm_device *dev) intel_guc_suspend(dev); - intel_suspend_gt_powersave(dev_priv); - intel_display_suspend(dev); intel_dp_mst_suspend(dev); @@ -1586,17 +1553,16 @@ static int i915_drm_resume(struct drm_device *dev) disable_rpm_wakeref_asserts(dev_priv); - ret = i915_ggtt_enable_hw(dev); + ret = i915_ggtt_enable_hw(dev_priv); if (ret) DRM_ERROR("failed to re-enable GGTT\n"); intel_csr_ucode_resume(dev_priv); - mutex_lock(&dev->struct_mutex); - i915_gem_restore_gtt_mappings(dev); - mutex_unlock(&dev->struct_mutex); + i915_gem_resume(dev); i915_restore_state(dev); + intel_pps_unlock_regs_wa(dev_priv); intel_opregion_setup(dev_priv); intel_init_pch_refclk(dev); @@ -1652,6 +1618,7 @@ static int i915_drm_resume(struct drm_device *dev) intel_opregion_notify_adapter(dev_priv, PCI_D0); + intel_autoenable_gt_powersave(dev_priv); drm_kms_helper_poll_enable(dev); enable_rpm_wakeref_asserts(dev_priv); @@ -1778,8 +1745,6 @@ int i915_reset(struct drm_i915_private *dev_priv) unsigned reset_counter; int ret; - intel_reset_gt_powersave(dev_priv); - mutex_lock(&dev->struct_mutex); /* Clear any previous failed attempts at recovery. Time to try again. */ @@ -1835,8 +1800,7 @@ int i915_reset(struct drm_i915_private *dev_priv) * previous concerns that it doesn't respond well to some forms * of re-init after reset. */ - if (INTEL_INFO(dev)->gen > 5) - intel_enable_gt_powersave(dev_priv); + intel_autoenable_gt_powersave(dev_priv); return 0; @@ -2462,7 +2426,6 @@ static int intel_runtime_resume(struct device *device) * we can do is to hope that things will still work (and disable RPM). */ i915_gem_init_swizzling(dev); - gen6_update_ring_freq(dev_priv); intel_runtime_pm_enable_interrupts(dev_priv); @@ -2618,6 +2581,7 @@ static struct drm_driver driver = { .postclose = i915_driver_postclose, .set_busid = drm_pci_set_busid, + .gem_close_object = i915_gem_close_object, .gem_free_object = i915_gem_free_object, .gem_vm_ops = &i915_gem_vm_ops, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 21f939074abc..e6069057eb98 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -61,6 +61,7 @@ #include "i915_gem.h" #include "i915_gem_gtt.h" #include "i915_gem_render_state.h" +#include "i915_gem_request.h" #include "intel_gvt.h" @@ -69,7 +70,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20160711" +#define DRIVER_DATE "20160822" #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ @@ -401,7 +402,7 @@ struct drm_i915_file_private { unsigned boosts; } rps; - unsigned int bsd_ring; + unsigned int bsd_engine; }; /* Used by dp and fdi links */ @@ -431,8 +432,6 @@ void intel_link_compute_m_n(int bpp, int nlanes, #define DRIVER_MINOR 6 #define DRIVER_PATCHLEVEL 0 -#define WATCH_LISTS 0 - struct opregion_header; struct opregion_acpi; struct opregion_swsci; @@ -456,15 +455,21 @@ struct intel_opregion { struct intel_overlay; struct intel_overlay_error_state; -#define I915_FENCE_REG_NONE -1 -#define I915_MAX_NUM_FENCES 32 -/* 32 fences + sign bit for FENCE_REG_NONE */ -#define I915_MAX_NUM_FENCE_BITS 6 - struct drm_i915_fence_reg { - struct list_head lru_list; - struct drm_i915_gem_object *obj; + struct list_head link; + struct drm_i915_private *i915; + struct i915_vma *vma; int pin_count; + int id; + /** + * Whether the tiling parameters for the currently + * associated fence register have changed. Note that + * for the purposes of tracking tiling changes we also + * treat the unfenced register, the register slot that + * the object occupies whilst it executes a fenced + * command (such as BLT on gen2/3), as a "fence". + */ + bool dirty; }; struct sdvo_device_mapping { @@ -476,130 +481,6 @@ struct sdvo_device_mapping { u8 ddc_pin; }; -struct intel_display_error_state; - -struct drm_i915_error_state { - struct kref ref; - struct timeval time; - - char error_msg[128]; - bool simulated; - int iommu; - u32 reset_count; - u32 suspend_count; - - /* Generic register state */ - u32 eir; - u32 pgtbl_er; - u32 ier; - u32 gtier[4]; - u32 ccid; - u32 derrmr; - u32 forcewake; - u32 error; /* gen6+ */ - u32 err_int; /* gen7 */ - u32 fault_data0; /* gen8, gen9 */ - u32 fault_data1; /* gen8, gen9 */ - u32 done_reg; - u32 gac_eco; - u32 gam_ecochk; - u32 gab_ctl; - u32 gfx_mode; - u32 extra_instdone[I915_NUM_INSTDONE_REG]; - u64 fence[I915_MAX_NUM_FENCES]; - struct intel_overlay_error_state *overlay; - struct intel_display_error_state *display; - struct drm_i915_error_object *semaphore_obj; - - struct drm_i915_error_ring { - bool valid; - /* Software tracked state */ - bool waiting; - int num_waiters; - int hangcheck_score; - enum intel_ring_hangcheck_action hangcheck_action; - int num_requests; - - /* our own tracking of ring head and tail */ - u32 cpu_ring_head; - u32 cpu_ring_tail; - - u32 last_seqno; - u32 semaphore_seqno[I915_NUM_ENGINES - 1]; - - /* Register state */ - u32 start; - u32 tail; - u32 head; - u32 ctl; - u32 hws; - u32 ipeir; - u32 ipehr; - u32 instdone; - u32 bbstate; - u32 instpm; - u32 instps; - u32 seqno; - u64 bbaddr; - u64 acthd; - u32 fault_reg; - u64 faddr; - u32 rc_psmi; /* sleep state */ - u32 semaphore_mboxes[I915_NUM_ENGINES - 1]; - - struct drm_i915_error_object { - int page_count; - u64 gtt_offset; - u32 *pages[0]; - } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; - - struct drm_i915_error_object *wa_ctx; - - struct drm_i915_error_request { - long jiffies; - u32 seqno; - u32 tail; - } *requests; - - struct drm_i915_error_waiter { - char comm[TASK_COMM_LEN]; - pid_t pid; - u32 seqno; - } *waiters; - - struct { - u32 gfx_mode; - union { - u64 pdp[4]; - u32 pp_dir_base; - }; - } vm_info; - - pid_t pid; - char comm[TASK_COMM_LEN]; - } ring[I915_NUM_ENGINES]; - - struct drm_i915_error_buffer { - u32 size; - u32 name; - u32 rseqno[I915_NUM_ENGINES], wseqno; - u64 gtt_offset; - u32 read_domains; - u32 write_domain; - s32 fence_reg:I915_MAX_NUM_FENCE_BITS; - s32 pinned:2; - u32 tiling:2; - u32 dirty:1; - u32 purgeable:1; - u32 userptr:1; - s32 ring:4; - u32 cache_level:3; - } **active_bo, **pinned_bo; - - u32 *active_bo_count, *pinned_bo_count; - u32 vm_count; -}; - struct intel_connector; struct intel_encoder; struct intel_crtc_state; @@ -794,6 +675,7 @@ struct intel_device_info { u8 gen; u16 gen_mask; u8 ring_mask; /* Rings supported by the HW */ + u8 num_rings; DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); /* Register offsets for the various display pipes and transcoders */ int pipe_offsets[I915_MAX_TRANSCODERS]; @@ -823,6 +705,134 @@ struct intel_device_info { #undef DEFINE_FLAG #undef SEP_SEMICOLON +struct intel_display_error_state; + +struct drm_i915_error_state { + struct kref ref; + struct timeval time; + + char error_msg[128]; + bool simulated; + int iommu; + u32 reset_count; + u32 suspend_count; + struct intel_device_info device_info; + + /* Generic register state */ + u32 eir; + u32 pgtbl_er; + u32 ier; + u32 gtier[4]; + u32 ccid; + u32 derrmr; + u32 forcewake; + u32 error; /* gen6+ */ + u32 err_int; /* gen7 */ + u32 fault_data0; /* gen8, gen9 */ + u32 fault_data1; /* gen8, gen9 */ + u32 done_reg; + u32 gac_eco; + u32 gam_ecochk; + u32 gab_ctl; + u32 gfx_mode; + u32 extra_instdone[I915_NUM_INSTDONE_REG]; + u64 fence[I915_MAX_NUM_FENCES]; + struct intel_overlay_error_state *overlay; + struct intel_display_error_state *display; + struct drm_i915_error_object *semaphore; + + struct drm_i915_error_engine { + int engine_id; + /* Software tracked state */ + bool waiting; + int num_waiters; + int hangcheck_score; + enum intel_engine_hangcheck_action hangcheck_action; + struct i915_address_space *vm; + int num_requests; + + /* our own tracking of ring head and tail */ + u32 cpu_ring_head; + u32 cpu_ring_tail; + + u32 last_seqno; + u32 semaphore_seqno[I915_NUM_ENGINES - 1]; + + /* Register state */ + u32 start; + u32 tail; + u32 head; + u32 ctl; + u32 mode; + u32 hws; + u32 ipeir; + u32 ipehr; + u32 instdone; + u32 bbstate; + u32 instpm; + u32 instps; + u32 seqno; + u64 bbaddr; + u64 acthd; + u32 fault_reg; + u64 faddr; + u32 rc_psmi; /* sleep state */ + u32 semaphore_mboxes[I915_NUM_ENGINES - 1]; + + struct drm_i915_error_object { + int page_count; + u64 gtt_offset; + u64 gtt_size; + u32 *pages[0]; + } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; + + struct drm_i915_error_object *wa_ctx; + + struct drm_i915_error_request { + long jiffies; + pid_t pid; + u32 seqno; + u32 head; + u32 tail; + } *requests; + + struct drm_i915_error_waiter { + char comm[TASK_COMM_LEN]; + pid_t pid; + u32 seqno; + } *waiters; + + struct { + u32 gfx_mode; + union { + u64 pdp[4]; + u32 pp_dir_base; + }; + } vm_info; + + pid_t pid; + char comm[TASK_COMM_LEN]; + } engine[I915_NUM_ENGINES]; + + struct drm_i915_error_buffer { + u32 size; + u32 name; + u32 rseqno[I915_NUM_ENGINES], wseqno; + u64 gtt_offset; + u32 read_domains; + u32 write_domain; + s32 fence_reg:I915_MAX_NUM_FENCE_BITS; + u32 tiling:2; + u32 dirty:1; + u32 purgeable:1; + u32 userptr:1; + s32 engine:4; + u32 cache_level:3; + } *active_bo[I915_NUM_ENGINES], *pinned_bo; + u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count; + struct i915_address_space *active_vm[I915_NUM_ENGINES]; +}; + enum i915_cache_level { I915_CACHE_NONE = 0, I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ @@ -879,22 +889,23 @@ struct i915_gem_context { struct drm_i915_private *i915; struct drm_i915_file_private *file_priv; struct i915_hw_ppgtt *ppgtt; + struct pid *pid; struct i915_ctx_hang_stats hang_stats; - /* Unique identifier for this context, used by the hw for tracking */ unsigned long flags; #define CONTEXT_NO_ZEROMAP BIT(0) #define CONTEXT_NO_ERROR_CAPTURE BIT(1) - unsigned hw_id; + + /* Unique identifier for this context, used by the hw for tracking */ + unsigned int hw_id; u32 user_handle; u32 ggtt_alignment; struct intel_context { - struct drm_i915_gem_object *state; - struct intel_ringbuffer *ringbuf; - struct i915_vma *lrc_vma; + struct i915_vma *state; + struct intel_ring *ring; uint32_t *lrc_reg_state; u64 lrc_desc; int pin_count; @@ -908,6 +919,7 @@ struct i915_gem_context { struct list_head link; u8 remap_slice; + bool closed:1; }; enum fb_op_origin { @@ -1061,13 +1073,6 @@ struct intel_gmbus { struct i915_suspend_saved_registers { u32 saveDSPARB; - u32 saveLVDS; - u32 savePP_ON_DELAYS; - u32 savePP_OFF_DELAYS; - u32 savePP_ON; - u32 savePP_OFF; - u32 savePP_CONTROL; - u32 savePP_DIVISOR; u32 saveFBC_CONTROL; u32 saveCACHE_MODE_0; u32 saveMI_ARB_STATE; @@ -1173,6 +1178,7 @@ struct intel_gen6_power_mgmt { u8 max_freq_softlimit; /* Max frequency permitted by the driver */ u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ u8 min_freq; /* AKA RPn. Minimum frequency */ + u8 boost_freq; /* Frequency to request when wait boosting */ u8 idle_freq; /* Frequency to request when we are idle */ u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ u8 rp1_freq; /* "less than" RP0 power/freqency */ @@ -1190,11 +1196,9 @@ struct intel_gen6_power_mgmt { bool client_boost; bool enabled; - struct delayed_work delayed_resume_work; + struct delayed_work autoenable_work; unsigned boosts; - struct intel_rps_client semaphores, mmioflips; - /* manual wa residency calculations */ struct intel_rps_ei up_ei, down_ei; @@ -1319,7 +1323,6 @@ struct i915_gem_mm { struct notifier_block oom_notifier; struct notifier_block vmap_notifier; struct shrinker shrinker; - bool shrinker_no_lock_stealing; /** LRU list of objects with fence regs on them. */ struct list_head fence_list; @@ -1331,7 +1334,7 @@ struct i915_gem_mm { bool interruptible; /* the indicator for dispatch video commands on two BSD rings */ - unsigned int bsd_ring_dispatch_index; + unsigned int bsd_engine_dispatch_index; /** Bit 6 swizzling required for X tiling */ uint32_t bit_6_swizzle_x; @@ -1670,7 +1673,7 @@ struct intel_pipe_crc { }; struct i915_frontbuffer_tracking { - struct mutex lock; + spinlock_t lock; /* * Tracking bits for delayed frontbuffer flushing du to gpu activity or @@ -1705,18 +1708,6 @@ struct i915_virtual_gpu { bool active; }; -struct i915_execbuffer_params { - struct drm_device *dev; - struct drm_file *file; - uint32_t dispatch_flags; - uint32_t args_batch_start_offset; - uint64_t batch_obj_vm_offset; - struct intel_engine_cs *engine; - struct drm_i915_gem_object *batch_obj; - struct i915_gem_context *ctx; - struct drm_i915_gem_request *request; -}; - /* used in computing the new watermarks state */ struct intel_wm_config { unsigned int num_pipes_active; @@ -1763,13 +1754,15 @@ struct drm_i915_private { uint32_t psr_mmio_base; + uint32_t pps_mmio_base; + wait_queue_head_t gmbus_wait_queue; struct pci_dev *bridge_dev; struct i915_gem_context *kernel_context; struct intel_engine_cs engine[I915_NUM_ENGINES]; - struct drm_i915_gem_object *semaphore_obj; - uint32_t last_seqno, next_seqno; + struct i915_vma *semaphore; + u32 next_seqno; struct drm_dma_handle *status_page_dmah; struct resource mch_res; @@ -1854,6 +1847,7 @@ struct drm_i915_private { enum modeset_restore modeset_restore; struct mutex modeset_restore_lock; struct drm_atomic_state *modeset_restore_state; + struct drm_modeset_acquire_ctx reset_ctx; struct list_head vm_list; /* Global list of all address spaces */ struct i915_ggtt ggtt; /* VM representing the global address space */ @@ -2016,12 +2010,7 @@ struct drm_i915_private { /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ struct { - int (*execbuf_submit)(struct i915_execbuffer_params *params, - struct drm_i915_gem_execbuffer2 *args, - struct list_head *vmas); - int (*init_engines)(struct drm_device *dev); void (*cleanup_engine)(struct intel_engine_cs *engine); - void (*stop_engine)(struct intel_engine_cs *engine); /** * Is the GPU currently considered idle, or busy executing @@ -2144,8 +2133,6 @@ struct drm_i915_gem_object_ops { */ #define INTEL_MAX_SPRITE_BITS_PER_PIPE 5 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 -#define INTEL_FRONTBUFFER_BITS \ - (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES) #define INTEL_FRONTBUFFER_PRIMARY(pipe) \ (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) #define INTEL_FRONTBUFFER_CURSOR(pipe) \ @@ -2169,18 +2156,21 @@ struct drm_i915_gem_object { struct drm_mm_node *stolen; struct list_head global_list; - struct list_head engine_list[I915_NUM_ENGINES]; /** Used in execbuf to temporarily hold a ref */ struct list_head obj_exec_link; struct list_head batch_pool_link; + unsigned long flags; /** * This is set if the object is on the active lists (has pending * rendering and so a non-zero seqno), and is not set if it i s on * inactive (ready to be unbound) list. */ - unsigned int active:I915_NUM_ENGINES; +#define I915_BO_ACTIVE_SHIFT 0 +#define I915_BO_ACTIVE_MASK ((1 << I915_NUM_ENGINES) - 1) +#define __I915_BO_ACTIVE(bo) \ + ((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK) /** * This is set if the object has been written to since last bound @@ -2189,37 +2179,11 @@ struct drm_i915_gem_object { unsigned int dirty:1; /** - * Fence register bits (if any) for this object. Will be set - * as needed when mapped into the GTT. - * Protected by dev->struct_mutex. - */ - signed int fence_reg:I915_MAX_NUM_FENCE_BITS; - - /** * Advice: are the backing pages purgeable? */ unsigned int madv:2; /** - * Current tiling mode for the object. - */ - unsigned int tiling_mode:2; - /** - * Whether the tiling parameters for the currently associated fence - * register have changed. Note that for the purposes of tracking - * tiling changes we also treat the unfenced register, the register - * slot that the object occupies whilst it executes a fenced - * command (such as BLT on gen2/3), as a "fence". - */ - unsigned int fence_dirty:1; - - /** - * Is the object at the current location in the gtt mappable and - * fenceable? Used to avoid costly recalculations. - */ - unsigned int map_and_fenceable:1; - - /** * Whether the current gtt mapping needs to be mappable (and isn't just * mappable by accident). Track pin and fault separate for a more * accurate mappable working set. @@ -2234,9 +2198,17 @@ struct drm_i915_gem_object { unsigned int cache_level:3; unsigned int cache_dirty:1; - unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; + atomic_t frontbuffer_bits; + unsigned int frontbuffer_ggtt_origin; /* write once */ + + /** Current tiling stride for the object, if it's tiled. */ + unsigned int tiling_and_stride; +#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */ +#define TILING_MASK (FENCE_MINIMUM_STRIDE-1) +#define STRIDE_MASK (~TILING_MASK) - unsigned int has_wc_mmap; + /** Count of VMA actually bound by this object */ + unsigned int bind_count; unsigned int pin_display; struct sg_table *pages; @@ -2256,14 +2228,9 @@ struct drm_i915_gem_object { * requests on one ring where the write request is older than the * read request. This allows for the CPU to read from an active * buffer by only waiting for the write to complete. - * */ - struct drm_i915_gem_request *last_read_req[I915_NUM_ENGINES]; - struct drm_i915_gem_request *last_write_req; - /** Breadcrumb of last fenced GPU access to the buffer. */ - struct drm_i915_gem_request *last_fenced_req; - - /** Current tiling stride for the object, if it's tiled. */ - uint32_t stride; + */ + struct i915_gem_active last_read[I915_NUM_ENGINES]; + struct i915_gem_active last_write; /** References from framebuffers, locks out tiling changes. */ unsigned long framebuffer_references; @@ -2287,7 +2254,56 @@ struct drm_i915_gem_object { } userptr; }; }; -#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) + +static inline struct drm_i915_gem_object * +to_intel_bo(struct drm_gem_object *gem) +{ + /* Assert that to_intel_bo(NULL) == NULL */ + BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base)); + + return container_of(gem, struct drm_i915_gem_object, base); +} + +static inline struct drm_i915_gem_object * +i915_gem_object_lookup(struct drm_file *file, u32 handle) +{ + return to_intel_bo(drm_gem_object_lookup(file, handle)); +} + +__deprecated +extern struct drm_gem_object * +drm_gem_object_lookup(struct drm_file *file, u32 handle); + +__attribute__((nonnull)) +static inline struct drm_i915_gem_object * +i915_gem_object_get(struct drm_i915_gem_object *obj) +{ + drm_gem_object_reference(&obj->base); + return obj; +} + +__deprecated +extern void drm_gem_object_reference(struct drm_gem_object *); + +__attribute__((nonnull)) +static inline void +i915_gem_object_put(struct drm_i915_gem_object *obj) +{ + drm_gem_object_unreference(&obj->base); +} + +__deprecated +extern void drm_gem_object_unreference(struct drm_gem_object *); + +__attribute__((nonnull)) +static inline void +i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj) +{ + drm_gem_object_unreference_unlocked(&obj->base); +} + +__deprecated +extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *); static inline bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) @@ -2295,6 +2311,67 @@ i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE; } +static inline unsigned long +i915_gem_object_get_active(const struct drm_i915_gem_object *obj) +{ + return (obj->flags >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK; +} + +static inline bool +i915_gem_object_is_active(const struct drm_i915_gem_object *obj) +{ + return i915_gem_object_get_active(obj); +} + +static inline void +i915_gem_object_set_active(struct drm_i915_gem_object *obj, int engine) +{ + obj->flags |= BIT(engine + I915_BO_ACTIVE_SHIFT); +} + +static inline void +i915_gem_object_clear_active(struct drm_i915_gem_object *obj, int engine) +{ + obj->flags &= ~BIT(engine + I915_BO_ACTIVE_SHIFT); +} + +static inline bool +i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj, + int engine) +{ + return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT); +} + +static inline unsigned int +i915_gem_object_get_tiling(struct drm_i915_gem_object *obj) +{ + return obj->tiling_and_stride & TILING_MASK; +} + +static inline bool +i915_gem_object_is_tiled(struct drm_i915_gem_object *obj) +{ + return i915_gem_object_get_tiling(obj) != I915_TILING_NONE; +} + +static inline unsigned int +i915_gem_object_get_stride(struct drm_i915_gem_object *obj) +{ + return obj->tiling_and_stride & STRIDE_MASK; +} + +static inline struct i915_vma *i915_vma_get(struct i915_vma *vma) +{ + i915_gem_object_get(vma->obj); + return vma; +} + +static inline void i915_vma_put(struct i915_vma *vma) +{ + lockdep_assert_held(&vma->vm->dev->struct_mutex); + i915_gem_object_put(vma->obj); +} + /* * Optimised SGL iterator for GEM objects */ @@ -2365,171 +2442,6 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg) (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0)) -/** - * Request queue structure. - * - * The request queue allows us to note sequence numbers that have been emitted - * and may be associated with active buffers to be retired. - * - * By keeping this list, we can avoid having to do questionable sequence - * number comparisons on buffer last_read|write_seqno. It also allows an - * emission time to be associated with the request for tracking how far ahead - * of the GPU the submission is. - * - * The requests are reference counted, so upon creation they should have an - * initial reference taken using kref_init - */ -struct drm_i915_gem_request { - struct kref ref; - - /** On Which ring this request was generated */ - struct drm_i915_private *i915; - struct intel_engine_cs *engine; - struct intel_signal_node signaling; - - /** GEM sequence number associated with the previous request, - * when the HWS breadcrumb is equal to this the GPU is processing - * this request. - */ - u32 previous_seqno; - - /** GEM sequence number associated with this request, - * when the HWS breadcrumb is equal or greater than this the GPU - * has finished processing this request. - */ - u32 seqno; - - /** Position in the ringbuffer of the start of the request */ - u32 head; - - /** - * Position in the ringbuffer of the start of the postfix. - * This is required to calculate the maximum available ringbuffer - * space without overwriting the postfix. - */ - u32 postfix; - - /** Position in the ringbuffer of the end of the whole request */ - u32 tail; - - /** Preallocate space in the ringbuffer for the emitting the request */ - u32 reserved_space; - - /** - * Context and ring buffer related to this request - * Contexts are refcounted, so when this request is associated with a - * context, we must increment the context's refcount, to guarantee that - * it persists while any request is linked to it. Requests themselves - * are also refcounted, so the request will only be freed when the last - * reference to it is dismissed, and the code in - * i915_gem_request_free() will then decrement the refcount on the - * context. - */ - struct i915_gem_context *ctx; - struct intel_ringbuffer *ringbuf; - - /** - * Context related to the previous request. - * As the contexts are accessed by the hardware until the switch is - * completed to a new context, the hardware may still be writing - * to the context object after the breadcrumb is visible. We must - * not unpin/unbind/prune that object whilst still active and so - * we keep the previous context pinned until the following (this) - * request is retired. - */ - struct i915_gem_context *previous_context; - - /** Batch buffer related to this request if any (used for - error state dump only) */ - struct drm_i915_gem_object *batch_obj; - - /** Time at which this request was emitted, in jiffies. */ - unsigned long emitted_jiffies; - - /** global list entry for this request */ - struct list_head list; - - struct drm_i915_file_private *file_priv; - /** file_priv list entry for this request */ - struct list_head client_list; - - /** process identifier submitting this request */ - struct pid *pid; - - /** - * The ELSP only accepts two elements at a time, so we queue - * context/tail pairs on a given queue (ring->execlist_queue) until the - * hardware is available. The queue serves a double purpose: we also use - * it to keep track of the up to 2 contexts currently in the hardware - * (usually one in execution and the other queued up by the GPU): We - * only remove elements from the head of the queue when the hardware - * informs us that an element has been completed. - * - * All accesses to the queue are mediated by a spinlock - * (ring->execlist_lock). - */ - - /** Execlist link in the submission queue.*/ - struct list_head execlist_link; - - /** Execlists no. of times this request has been sent to the ELSP */ - int elsp_submitted; - - /** Execlists context hardware id. */ - unsigned ctx_hw_id; -}; - -struct drm_i915_gem_request * __must_check -i915_gem_request_alloc(struct intel_engine_cs *engine, - struct i915_gem_context *ctx); -void i915_gem_request_free(struct kref *req_ref); -int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, - struct drm_file *file); - -static inline uint32_t -i915_gem_request_get_seqno(struct drm_i915_gem_request *req) -{ - return req ? req->seqno : 0; -} - -static inline struct intel_engine_cs * -i915_gem_request_get_engine(struct drm_i915_gem_request *req) -{ - return req ? req->engine : NULL; -} - -static inline struct drm_i915_gem_request * -i915_gem_request_reference(struct drm_i915_gem_request *req) -{ - if (req) - kref_get(&req->ref); - return req; -} - -static inline void -i915_gem_request_unreference(struct drm_i915_gem_request *req) -{ - kref_put(&req->ref, i915_gem_request_free); -} - -static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, - struct drm_i915_gem_request *src) -{ - if (src) - i915_gem_request_reference(src); - - if (*pdst) - i915_gem_request_unreference(*pdst); - - *pdst = src; -} - -/* - * XXX: i915_gem_request_completed should be here but currently needs the - * definition of i915_seqno_passed() which is below. It will be moved in - * a later patch when the call to i915_seqno_passed() is obsoleted... - */ - /* * A command that requires special handling by the command parser. */ @@ -2617,8 +2529,9 @@ struct drm_i915_cmd_descriptor { /* * A table of commands requiring special handling by the command parser. * - * Each ring has an array of tables. Each table consists of an array of command - * descriptors, which must be sorted with command opcodes in ascending order. + * Each engine has an array of tables. Each table consists of an array of + * command descriptors, which must be sorted with command opcodes in + * ascending order. */ struct drm_i915_cmd_table { const struct drm_i915_cmd_descriptor *table; @@ -2932,6 +2845,8 @@ extern int i915_resume_switcheroo(struct drm_device *dev); int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, int enable_ppgtt); +bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value); + /* i915_drv.c */ void __printf(3, 4) __i915_printk(struct drm_i915_private *dev_priv, const char *level, @@ -3107,11 +3022,6 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -void i915_gem_execbuffer_move_to_active(struct list_head *vmas, - struct drm_i915_gem_request *req); -int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, - struct drm_i915_gem_execbuffer2 *args, - struct list_head *vmas); int i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_execbuffer2(struct drm_device *dev, void *data, @@ -3150,47 +3060,28 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev, size_t size); struct drm_i915_gem_object *i915_gem_object_create_from_data( struct drm_device *dev, const void *data, size_t size); +void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); void i915_gem_free_object(struct drm_gem_object *obj); -void i915_gem_vma_destroy(struct i915_vma *vma); - -/* Flags used by pin/bind&friends. */ -#define PIN_MAPPABLE (1<<0) -#define PIN_NONBLOCK (1<<1) -#define PIN_GLOBAL (1<<2) -#define PIN_OFFSET_BIAS (1<<3) -#define PIN_USER (1<<4) -#define PIN_UPDATE (1<<5) -#define PIN_ZONE_4G (1<<6) -#define PIN_HIGH (1<<7) -#define PIN_OFFSET_FIXED (1<<8) -#define PIN_OFFSET_MASK (~4095) -int __must_check -i915_gem_object_pin(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - uint32_t alignment, - uint64_t flags); -int __must_check + +struct i915_vma * __must_check i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view, - uint32_t alignment, - uint64_t flags); + u64 size, + u64 alignment, + u64 flags); int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags); void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); int __must_check i915_vma_unbind(struct i915_vma *vma); -/* - * BEWARE: Do not use the function below unless you can _absolutely_ - * _guarantee_ VMA in question is _not in use_ anywhere. - */ -int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma); +void i915_vma_close(struct i915_vma *vma); +void i915_vma_destroy(struct i915_vma *vma); + +int i915_gem_object_unbind(struct drm_i915_gem_object *obj); int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); void i915_gem_release_mmap(struct drm_i915_gem_object *obj); -int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, - int *needs_clflush); - int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); static inline int __sg_page_count(struct scatterlist *sg) @@ -3250,13 +3141,20 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) obj->pages_pin_count--; } +enum i915_map_type { + I915_MAP_WB = 0, + I915_MAP_WC, +}; + /** * i915_gem_object_pin_map - return a contiguous mapping of the entire object * @obj - the object to map into kernel address space + * @type - the type of mapping, used to select pgprot_t * * Calls i915_gem_object_pin_pages() to prevent reaping of the object's * pages and then returns a contiguous mapping of the backing storage into - * the kernel address space. + * the kernel address space. Based on the @type of mapping, the PTE will be + * set to either WriteBack or WriteCombine (via pgprot_t). * * The caller must hold the struct_mutex, and is responsible for calling * i915_gem_object_unpin_map() when the mapping is no longer required. @@ -3264,7 +3162,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) * Returns the pointer through which to access the mapped object, or an * ERR_PTR() on error. */ -void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj); +void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, + enum i915_map_type type); /** * i915_gem_object_unpin_map - releases an earlier mapping @@ -3283,12 +3182,26 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) i915_gem_object_unpin_pages(obj); } +int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, + unsigned int *needs_clflush); +int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, + unsigned int *needs_clflush); +#define CLFLUSH_BEFORE 0x1 +#define CLFLUSH_AFTER 0x2 +#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER) + +static inline void +i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj) +{ + i915_gem_object_unpin_pages(obj); +} + int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); int i915_gem_object_sync(struct drm_i915_gem_object *obj, - struct intel_engine_cs *to, - struct drm_i915_gem_request **to_req); + struct drm_i915_gem_request *to); void i915_vma_move_to_active(struct i915_vma *vma, - struct drm_i915_gem_request *req); + struct drm_i915_gem_request *req, + unsigned int flags); int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); @@ -3299,44 +3212,12 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, struct drm_i915_gem_object *new, unsigned frontbuffer_bits); -/** - * Returns true if seq1 is later than seq2. - */ -static inline bool -i915_seqno_passed(uint32_t seq1, uint32_t seq2) -{ - return (int32_t)(seq1 - seq2) >= 0; -} - -static inline bool i915_gem_request_started(const struct drm_i915_gem_request *req) -{ - return i915_seqno_passed(intel_engine_get_seqno(req->engine), - req->previous_seqno); -} - -static inline bool i915_gem_request_completed(const struct drm_i915_gem_request *req) -{ - return i915_seqno_passed(intel_engine_get_seqno(req->engine), - req->seqno); -} - -bool __i915_spin_request(const struct drm_i915_gem_request *request, - int state, unsigned long timeout_us); -static inline bool i915_spin_request(const struct drm_i915_gem_request *request, - int state, unsigned long timeout_us) -{ - return (i915_gem_request_started(request) && - __i915_spin_request(request, state, timeout_us)); -} - -int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno); int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); struct drm_i915_gem_request * i915_gem_find_active_request(struct intel_engine_cs *engine); void i915_gem_retire_requests(struct drm_i915_private *dev_priv); -void i915_gem_retire_requests_ring(struct intel_engine_cs *engine); static inline u32 i915_reset_counter(struct i915_gpu_error *error) { @@ -3381,24 +3262,13 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error) void i915_gem_reset(struct drm_device *dev); bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); int __must_check i915_gem_init(struct drm_device *dev); -int i915_gem_init_engines(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev); void i915_gem_init_swizzling(struct drm_device *dev); void i915_gem_cleanup_engines(struct drm_device *dev); -int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv); +int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, + bool interruptible); int __must_check i915_gem_suspend(struct drm_device *dev); -void __i915_add_request(struct drm_i915_gem_request *req, - struct drm_i915_gem_object *batch_obj, - bool flush_caches); -#define i915_add_request(req) \ - __i915_add_request(req, NULL, true) -#define i915_add_request_no_flush(req) \ - __i915_add_request(req, NULL, false) -int __i915_wait_request(struct drm_i915_gem_request *req, - bool interruptible, - s64 *timeout, - struct intel_rps_client *rps); -int __must_check i915_wait_request(struct drm_i915_gem_request *req); +void i915_gem_resume(struct drm_device *dev); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, @@ -3408,22 +3278,20 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write); int __must_check i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); -int __must_check +struct i915_vma * __must_check i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, const struct i915_ggtt_view *view); -void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, - const struct i915_ggtt_view *view); +void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); int i915_gem_open(struct drm_device *dev, struct drm_file *file); void i915_gem_release(struct drm_device *dev, struct drm_file *file); -uint32_t -i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode); -uint32_t -i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, - int tiling_mode, bool fenced); +u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 size, + int tiling_mode); +u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size, + int tiling_mode, bool fenced); int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level); @@ -3434,86 +3302,82 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, struct dma_buf *i915_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gem_obj, int flags); -u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, - const struct i915_ggtt_view *view); -u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, - struct i915_address_space *vm); -static inline u64 -i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) -{ - return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal); -} - -bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); -bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, - const struct i915_ggtt_view *view); -bool i915_gem_obj_bound(struct drm_i915_gem_object *o, - struct i915_address_space *vm); - struct i915_vma * i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, - struct i915_address_space *vm); -struct i915_vma * -i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, - const struct i915_ggtt_view *view); + struct i915_address_space *vm, + const struct i915_ggtt_view *view); struct i915_vma * i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, - struct i915_address_space *vm); -struct i915_vma * -i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, - const struct i915_ggtt_view *view); - -static inline struct i915_vma * -i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) -{ - return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal); -} -bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj); + struct i915_address_space *vm, + const struct i915_ggtt_view *view); -/* Some GGTT VM helpers */ static inline struct i915_hw_ppgtt * i915_vm_to_ppgtt(struct i915_address_space *vm) { return container_of(vm, struct i915_hw_ppgtt, base); } +static inline struct i915_vma * +i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view) +{ + return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view); +} -static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) +static inline unsigned long +i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o, + const struct i915_ggtt_view *view) { - return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); + return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view)); } -unsigned long -i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj); +/* i915_gem_fence.c */ +int __must_check i915_vma_get_fence(struct i915_vma *vma); +int __must_check i915_vma_put_fence(struct i915_vma *vma); -static inline int __must_check -i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, - uint32_t alignment, - unsigned flags) +/** + * i915_vma_pin_fence - pin fencing state + * @vma: vma to pin fencing for + * + * This pins the fencing state (whether tiled or untiled) to make sure the + * vma (and its object) is ready to be used as a scanout target. Fencing + * status must be synchronize first by calling i915_vma_get_fence(): + * + * The resulting fence pin reference must be released again with + * i915_vma_unpin_fence(). + * + * Returns: + * + * True if the vma has a fence, false otherwise. + */ +static inline bool +i915_vma_pin_fence(struct i915_vma *vma) { - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; - - return i915_gem_object_pin(obj, &ggtt->base, - alignment, flags | PIN_GLOBAL); + if (vma->fence) { + vma->fence->pin_count++; + return true; + } else + return false; } -void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, - const struct i915_ggtt_view *view); +/** + * i915_vma_unpin_fence - unpin fencing state + * @vma: vma to unpin fencing for + * + * This releases the fence pin reference acquired through + * i915_vma_pin_fence. It will handle both objects with and without an + * attached fence correctly, callers do not need to distinguish this. + */ static inline void -i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) +i915_vma_unpin_fence(struct i915_vma *vma) { - i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal); + if (vma->fence) { + GEM_BUG_ON(vma->fence->pin_count <= 0); + vma->fence->pin_count--; + } } -/* i915_gem_fence.c */ -int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); -int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); - -bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj); -void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj); - void i915_gem_restore_fences(struct drm_device *dev); void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); @@ -3528,6 +3392,7 @@ void i915_gem_context_reset(struct drm_device *dev); int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); int i915_switch_context(struct drm_i915_gem_request *req); +int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv); void i915_gem_context_free(struct kref *ctx_ref); struct drm_i915_gem_object * i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); @@ -3548,12 +3413,14 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) return ctx; } -static inline void i915_gem_context_reference(struct i915_gem_context *ctx) +static inline struct i915_gem_context * +i915_gem_context_get(struct i915_gem_context *ctx) { kref_get(&ctx->ref); + return ctx; } -static inline void i915_gem_context_unreference(struct i915_gem_context *ctx) +static inline void i915_gem_context_put(struct i915_gem_context *ctx) { lockdep_assert_held(&ctx->i915->drm.struct_mutex); kref_put(&ctx->ref, i915_gem_context_free); @@ -3576,13 +3443,10 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, struct drm_file *file); /* i915_gem_evict.c */ -int __must_check i915_gem_evict_something(struct drm_device *dev, - struct i915_address_space *vm, - int min_size, - unsigned alignment, +int __must_check i915_gem_evict_something(struct i915_address_space *vm, + u64 min_size, u64 alignment, unsigned cache_level, - unsigned long start, - unsigned long end, + u64 start, u64 end, unsigned flags); int __must_check i915_gem_evict_for_vma(struct i915_vma *target); int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); @@ -3590,6 +3454,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); /* belongs in i915_gem_gtt.h */ static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) { + wmb(); if (INTEL_GEN(dev_priv) < 6) intel_gtt_chipset_flush(); } @@ -3634,16 +3499,9 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec struct drm_i915_private *dev_priv = to_i915(obj->base.dev); return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && - obj->tiling_mode != I915_TILING_NONE; + i915_gem_object_is_tiled(obj); } -/* i915_gem_debug.c */ -#if WATCH_LISTS -int i915_verify_lists(struct drm_device *dev); -#else -#define i915_verify_lists(dev) 0 -#endif - /* i915_debugfs.c */ #ifdef CONFIG_DEBUG_FS int i915_debugfs_register(struct drm_i915_private *dev_priv); @@ -3684,15 +3542,15 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type); /* i915_cmd_parser.c */ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); -int i915_cmd_parser_init_ring(struct intel_engine_cs *engine); -void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine); -bool i915_needs_cmd_parser(struct intel_engine_cs *engine); -int i915_parse_cmds(struct intel_engine_cs *engine, - struct drm_i915_gem_object *batch_obj, - struct drm_i915_gem_object *shadow_batch_obj, - u32 batch_start_offset, - u32 batch_len, - bool is_master); +void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); +void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); +bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine); +int intel_engine_cmd_parser(struct intel_engine_cs *engine, + struct drm_i915_gem_object *batch_obj, + struct drm_i915_gem_object *shadow_batch_obj, + u32 batch_start_offset, + u32 batch_len, + bool is_master); /* i915_suspend.c */ extern int i915_save_state(struct drm_device *dev); @@ -3800,7 +3658,6 @@ extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val); extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); -extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv); int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); @@ -4017,7 +3874,7 @@ static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req) * is woken. */ if (engine->irq_seqno_barrier && - READ_ONCE(engine->breadcrumbs.irq_seqno_bh) == current && + rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh) == current && cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) { struct task_struct *tsk; @@ -4042,7 +3899,7 @@ static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req) * irq_posted == false but we are still running). */ rcu_read_lock(); - tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh); + tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh); if (tsk && tsk != current) /* Note that if the bottom-half is changed as we * are sending the wake-up, the new bottom-half will @@ -4071,4 +3928,32 @@ static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req) return false; } +void i915_memcpy_init_early(struct drm_i915_private *dev_priv); +bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len); + +/* i915_mm.c */ +int remap_io_mapping(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, unsigned long size, + struct io_mapping *iomap); + +#define ptr_mask_bits(ptr) ({ \ + unsigned long __v = (unsigned long)(ptr); \ + (typeof(ptr))(__v & PAGE_MASK); \ +}) + +#define ptr_unpack_bits(ptr, bits) ({ \ + unsigned long __v = (unsigned long)(ptr); \ + (bits) = __v & ~PAGE_MASK; \ + (typeof(ptr))(__v & PAGE_MASK); \ +}) + +#define ptr_pack_bits(ptr, bits) \ + ((typeof(ptr))((unsigned long)(ptr) | (bits))) + +#define fetch_and_zero(ptr) ({ \ + typeof(*ptr) __T = *(ptr); \ + *(ptr) = (typeof(*ptr))0; \ + __T; \ +}) + #endif diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 11681501d7b1..04607d4115d6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -29,10 +29,13 @@ #include <drm/drm_vma_manager.h> #include <drm/i915_drm.h> #include "i915_drv.h" +#include "i915_gem_dmabuf.h" #include "i915_vgpu.h" #include "i915_trace.h" #include "intel_drv.h" +#include "intel_frontbuffer.h" #include "intel_mocs.h" +#include <linux/reservation.h> #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/swap.h> @@ -41,10 +44,6 @@ static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); -static void -i915_gem_object_retire__write(struct drm_i915_gem_object *obj); -static void -i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring); static bool cpu_cache_is_coherent(struct drm_device *dev, enum i915_cache_level level) @@ -139,7 +138,6 @@ int i915_mutex_lock_interruptible(struct drm_device *dev) if (ret) return ret; - WARN_ON(i915_verify_lists(dev)); return 0; } @@ -156,10 +154,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, pinned = 0; mutex_lock(&dev->struct_mutex); list_for_each_entry(vma, &ggtt->base.active_list, vm_link) - if (vma->pin_count) + if (i915_vma_is_pinned(vma)) pinned += vma->node.size; list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link) - if (vma->pin_count) + if (i915_vma_is_pinned(vma)) pinned += vma->node.size; mutex_unlock(&dev->struct_mutex); @@ -281,23 +279,128 @@ static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { .release = i915_gem_object_release_phys, }; -static int -drop_pages(struct drm_i915_gem_object *obj) +int i915_gem_object_unbind(struct drm_i915_gem_object *obj) { - struct i915_vma *vma, *next; + struct i915_vma *vma; + LIST_HEAD(still_in_list); int ret; - drm_gem_object_reference(&obj->base); - list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) - if (i915_vma_unbind(vma)) - break; + lockdep_assert_held(&obj->base.dev->struct_mutex); - ret = i915_gem_object_put_pages(obj); - drm_gem_object_unreference(&obj->base); + /* Closed vma are removed from the obj->vma_list - but they may + * still have an active binding on the object. To remove those we + * must wait for all rendering to complete to the object (as unbinding + * must anyway), and retire the requests. + */ + ret = i915_gem_object_wait_rendering(obj, false); + if (ret) + return ret; + + i915_gem_retire_requests(to_i915(obj->base.dev)); + + while ((vma = list_first_entry_or_null(&obj->vma_list, + struct i915_vma, + obj_link))) { + list_move_tail(&vma->obj_link, &still_in_list); + ret = i915_vma_unbind(vma); + if (ret) + break; + } + list_splice(&still_in_list, &obj->vma_list); return ret; } +/** + * Ensures that all rendering to the object has completed and the object is + * safe to unbind from the GTT or access from the CPU. + * @obj: i915 gem object + * @readonly: waiting for just read access or read-write access + */ +int +i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, + bool readonly) +{ + struct reservation_object *resv; + struct i915_gem_active *active; + unsigned long active_mask; + int idx; + + lockdep_assert_held(&obj->base.dev->struct_mutex); + + if (!readonly) { + active = obj->last_read; + active_mask = i915_gem_object_get_active(obj); + } else { + active_mask = 1; + active = &obj->last_write; + } + + for_each_active(active_mask, idx) { + int ret; + + ret = i915_gem_active_wait(&active[idx], + &obj->base.dev->struct_mutex); + if (ret) + return ret; + } + + resv = i915_gem_object_get_dmabuf_resv(obj); + if (resv) { + long err; + + err = reservation_object_wait_timeout_rcu(resv, !readonly, true, + MAX_SCHEDULE_TIMEOUT); + if (err < 0) + return err; + } + + return 0; +} + +/* A nonblocking variant of the above wait. Must be called prior to + * acquiring the mutex for the object, as the object state may change + * during this call. A reference must be held by the caller for the object. + */ +static __must_check int +__unsafe_wait_rendering(struct drm_i915_gem_object *obj, + struct intel_rps_client *rps, + bool readonly) +{ + struct i915_gem_active *active; + unsigned long active_mask; + int idx; + + active_mask = __I915_BO_ACTIVE(obj); + if (!active_mask) + return 0; + + if (!readonly) { + active = obj->last_read; + } else { + active_mask = 1; + active = &obj->last_write; + } + + for_each_active(active_mask, idx) { + int ret; + + ret = i915_gem_active_wait_unlocked(&active[idx], + true, NULL, rps); + if (ret) + return ret; + } + + return 0; +} + +static struct intel_rps_client *to_rps_client(struct drm_file *file) +{ + struct drm_i915_file_private *fpriv = file->driver_priv; + + return &fpriv->rps; +} + int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) @@ -318,7 +421,11 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, if (obj->base.filp == NULL) return -EINVAL; - ret = drop_pages(obj); + ret = i915_gem_object_unbind(obj); + if (ret) + return ret; + + ret = i915_gem_object_put_pages(obj); if (ret) return ret; @@ -408,7 +515,7 @@ i915_gem_create(struct drm_file *file, ret = drm_gem_handle_create(file, &obj->base, &handle); /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(&obj->base); + i915_gem_object_put_unlocked(obj); if (ret) return ret; @@ -502,33 +609,106 @@ __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, * flush the object from the CPU cache. */ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, - int *needs_clflush) + unsigned int *needs_clflush) { int ret; *needs_clflush = 0; - if (WARN_ON(!i915_gem_object_has_struct_page(obj))) - return -EINVAL; + if (!i915_gem_object_has_struct_page(obj)) + return -ENODEV; + + ret = i915_gem_object_wait_rendering(obj, true); + if (ret) + return ret; + + ret = i915_gem_object_get_pages(obj); + if (ret) + return ret; - if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { - /* If we're not in the cpu read domain, set ourself into the gtt - * read domain and manually flush cachelines (if required). This - * optimizes for the case when the gpu will dirty the data - * anyway again before the next pread happens. */ + i915_gem_object_pin_pages(obj); + + i915_gem_object_flush_gtt_write_domain(obj); + + /* If we're not in the cpu read domain, set ourself into the gtt + * read domain and manually flush cachelines (if required). This + * optimizes for the case when the gpu will dirty the data + * anyway again before the next pread happens. + */ + if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) *needs_clflush = !cpu_cache_is_coherent(obj->base.dev, obj->cache_level); - ret = i915_gem_object_wait_rendering(obj, true); + + if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) { + ret = i915_gem_object_set_to_cpu_domain(obj, false); if (ret) - return ret; + goto err_unpin; + + *needs_clflush = 0; } + /* return with the pages pinned */ + return 0; + +err_unpin: + i915_gem_object_unpin_pages(obj); + return ret; +} + +int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, + unsigned int *needs_clflush) +{ + int ret; + + *needs_clflush = 0; + if (!i915_gem_object_has_struct_page(obj)) + return -ENODEV; + + ret = i915_gem_object_wait_rendering(obj, false); + if (ret) + return ret; + ret = i915_gem_object_get_pages(obj); if (ret) return ret; i915_gem_object_pin_pages(obj); + i915_gem_object_flush_gtt_write_domain(obj); + + /* If we're not in the cpu write domain, set ourself into the + * gtt write domain and manually flush cachelines (as required). + * This optimizes for the case when the gpu will use the data + * right away and we therefore have to clflush anyway. + */ + if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) + *needs_clflush |= cpu_write_needs_clflush(obj) << 1; + + /* Same trick applies to invalidate partially written cachelines read + * before writing. + */ + if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) + *needs_clflush |= !cpu_cache_is_coherent(obj->base.dev, + obj->cache_level); + + if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) { + ret = i915_gem_object_set_to_cpu_domain(obj, true); + if (ret) + goto err_unpin; + + *needs_clflush = 0; + } + + if ((*needs_clflush & CLFLUSH_AFTER) == 0) + obj->cache_dirty = true; + + intel_fb_obj_invalidate(obj, ORIGIN_CPU); + obj->dirty = 1; + /* return with the pages pinned */ + return 0; + +err_unpin: + i915_gem_object_unpin_pages(obj); return ret; } @@ -638,14 +818,24 @@ i915_gem_gtt_pread(struct drm_device *dev, { struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct i915_vma *vma; struct drm_mm_node node; char __user *user_data; uint64_t remain; uint64_t offset; int ret; - ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE); - if (ret) { + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); + if (!IS_ERR(vma)) { + node.start = i915_ggtt_offset(vma); + node.allocated = false; + ret = i915_vma_put_fence(vma); + if (ret) { + i915_vma_unpin(vma); + vma = ERR_PTR(ret); + } + } + if (IS_ERR(vma)) { ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE); if (ret) goto out; @@ -657,12 +847,6 @@ i915_gem_gtt_pread(struct drm_device *dev, } i915_gem_object_pin_pages(obj); - } else { - node.start = i915_gem_obj_ggtt_offset(obj); - node.allocated = false; - ret = i915_gem_object_put_fence(obj); - if (ret) - goto out_unpin; } ret = i915_gem_object_set_to_gtt_domain(obj, false); @@ -707,7 +891,7 @@ i915_gem_gtt_pread(struct drm_device *dev, * and write to user memory which may result into page * faults, and so we cannot perform this under struct_mutex. */ - if (slow_user_access(ggtt->mappable, page_base, + if (slow_user_access(&ggtt->mappable, page_base, page_offset, user_data, page_length, false)) { ret = -EFAULT; @@ -739,7 +923,7 @@ out_unpin: i915_gem_object_unpin_pages(obj); remove_mappable_node(&node); } else { - i915_gem_object_ggtt_unpin(obj); + i915_vma_unpin(vma); } out: return ret; @@ -760,19 +944,14 @@ i915_gem_shmem_pread(struct drm_device *dev, int needs_clflush = 0; struct sg_page_iter sg_iter; - if (!i915_gem_object_has_struct_page(obj)) - return -ENODEV; - - user_data = u64_to_user_ptr(args->data_ptr); - remain = args->size; - - obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); - ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush); if (ret) return ret; + obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); + user_data = u64_to_user_ptr(args->data_ptr); offset = args->offset; + remain = args->size; for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, offset >> PAGE_SHIFT) { @@ -828,7 +1007,7 @@ next_page: } out: - i915_gem_object_unpin_pages(obj); + i915_gem_obj_finish_shmem_access(obj); return ret; } @@ -857,36 +1036,44 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, args->size)) return -EFAULT; - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; - - obj = to_intel_bo(drm_gem_object_lookup(file, args->handle)); - if (&obj->base == NULL) { - ret = -ENOENT; - goto unlock; - } + obj = i915_gem_object_lookup(file, args->handle); + if (!obj) + return -ENOENT; /* Bounds check source. */ if (args->offset > obj->base.size || args->size > obj->base.size - args->offset) { ret = -EINVAL; - goto out; + goto err; } trace_i915_gem_object_pread(obj, args->offset, args->size); + ret = __unsafe_wait_rendering(obj, to_rps_client(file), true); + if (ret) + goto err; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + goto err; + ret = i915_gem_shmem_pread(dev, obj, args, file); /* pread for non shmem backed objects */ - if (ret == -EFAULT || ret == -ENODEV) + if (ret == -EFAULT || ret == -ENODEV) { + intel_runtime_pm_get(to_i915(dev)); ret = i915_gem_gtt_pread(dev, obj, args->size, args->offset, args->data_ptr); + intel_runtime_pm_put(to_i915(dev)); + } -out: - drm_gem_object_unreference(&obj->base); -unlock: + i915_gem_object_put(obj); mutex_unlock(&dev->struct_mutex); + + return ret; + +err: + i915_gem_object_put_unlocked(obj); return ret; } @@ -916,7 +1103,7 @@ fast_user_write(struct io_mapping *mapping, /** * This is the fast pwrite path, where we copy the data directly from the * user into the GTT, uncached. - * @dev: drm device pointer + * @i915: i915 device private data * @obj: i915 gem object * @args: pwrite arguments structure * @file: drm file pointer @@ -929,17 +1116,28 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915, { struct i915_ggtt *ggtt = &i915->ggtt; struct drm_device *dev = obj->base.dev; + struct i915_vma *vma; struct drm_mm_node node; uint64_t remain, offset; char __user *user_data; int ret; bool hit_slow_path = false; - if (obj->tiling_mode != I915_TILING_NONE) + if (i915_gem_object_is_tiled(obj)) return -EFAULT; - ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK); - if (ret) { + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, + PIN_MAPPABLE | PIN_NONBLOCK); + if (!IS_ERR(vma)) { + node.start = i915_ggtt_offset(vma); + node.allocated = false; + ret = i915_vma_put_fence(vma); + if (ret) { + i915_vma_unpin(vma); + vma = ERR_PTR(ret); + } + } + if (IS_ERR(vma)) { ret = insert_mappable_node(i915, &node, PAGE_SIZE); if (ret) goto out; @@ -951,19 +1149,13 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915, } i915_gem_object_pin_pages(obj); - } else { - node.start = i915_gem_obj_ggtt_offset(obj); - node.allocated = false; - ret = i915_gem_object_put_fence(obj); - if (ret) - goto out_unpin; } ret = i915_gem_object_set_to_gtt_domain(obj, true); if (ret) goto out_unpin; - intel_fb_obj_invalidate(obj, ORIGIN_GTT); + intel_fb_obj_invalidate(obj, ORIGIN_CPU); obj->dirty = true; user_data = u64_to_user_ptr(args->data_ptr); @@ -995,11 +1187,11 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915, * If the object is non-shmem backed, we retry again with the * path that handles page fault. */ - if (fast_user_write(ggtt->mappable, page_base, + if (fast_user_write(&ggtt->mappable, page_base, page_offset, user_data, page_length)) { hit_slow_path = true; mutex_unlock(&dev->struct_mutex); - if (slow_user_access(ggtt->mappable, + if (slow_user_access(&ggtt->mappable, page_base, page_offset, user_data, page_length, true)) { @@ -1030,7 +1222,7 @@ out_flush: } } - intel_fb_obj_flush(obj, false, ORIGIN_GTT); + intel_fb_obj_flush(obj, false, ORIGIN_CPU); out_unpin: if (node.allocated) { wmb(); @@ -1040,7 +1232,7 @@ out_unpin: i915_gem_object_unpin_pages(obj); remove_mappable_node(&node); } else { - i915_gem_object_ggtt_unpin(obj); + i915_vma_unpin(vma); } out: return ret; @@ -1123,41 +1315,17 @@ i915_gem_shmem_pwrite(struct drm_device *dev, int shmem_page_offset, page_length, ret = 0; int obj_do_bit17_swizzling, page_do_bit17_swizzling; int hit_slowpath = 0; - int needs_clflush_after = 0; - int needs_clflush_before = 0; + unsigned int needs_clflush; struct sg_page_iter sg_iter; - user_data = u64_to_user_ptr(args->data_ptr); - remain = args->size; - - obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); - - if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { - /* If we're not in the cpu write domain, set ourself into the gtt - * write domain and manually flush cachelines (if required). This - * optimizes for the case when the gpu will use the data - * right away and we therefore have to clflush anyway. */ - needs_clflush_after = cpu_write_needs_clflush(obj); - ret = i915_gem_object_wait_rendering(obj, false); - if (ret) - return ret; - } - /* Same trick applies to invalidate partially written cachelines read - * before writing. */ - if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) - needs_clflush_before = - !cpu_cache_is_coherent(dev, obj->cache_level); - - ret = i915_gem_object_get_pages(obj); + ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush); if (ret) return ret; - intel_fb_obj_invalidate(obj, ORIGIN_CPU); - - i915_gem_object_pin_pages(obj); - + obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); + user_data = u64_to_user_ptr(args->data_ptr); offset = args->offset; - obj->dirty = 1; + remain = args->size; for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, offset >> PAGE_SHIFT) { @@ -1181,7 +1349,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, /* If we don't overwrite a cacheline completely we need to be * careful to have up-to-date data by first clflushing. Don't * overcomplicate things and flush the entire patch. */ - partial_cacheline_write = needs_clflush_before && + partial_cacheline_write = needs_clflush & CLFLUSH_BEFORE && ((shmem_page_offset | page_length) & (boot_cpu_data.x86_clflush_size - 1)); @@ -1191,7 +1359,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, ret = shmem_pwrite_fast(page, shmem_page_offset, page_length, user_data, page_do_bit17_swizzling, partial_cacheline_write, - needs_clflush_after); + needs_clflush & CLFLUSH_AFTER); if (ret == 0) goto next_page; @@ -1200,7 +1368,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, ret = shmem_pwrite_slow(page, shmem_page_offset, page_length, user_data, page_do_bit17_swizzling, partial_cacheline_write, - needs_clflush_after); + needs_clflush & CLFLUSH_AFTER); mutex_lock(&dev->struct_mutex); @@ -1214,7 +1382,7 @@ next_page: } out: - i915_gem_object_unpin_pages(obj); + i915_gem_obj_finish_shmem_access(obj); if (hit_slowpath) { /* @@ -1222,17 +1390,15 @@ out: * cachelines in-line while writing and the object moved * out of the cpu write domain while we've dropped the lock. */ - if (!needs_clflush_after && + if (!(needs_clflush & CLFLUSH_AFTER) && obj->base.write_domain != I915_GEM_DOMAIN_CPU) { if (i915_gem_clflush_object(obj, obj->pin_display)) - needs_clflush_after = true; + needs_clflush |= CLFLUSH_AFTER; } } - if (needs_clflush_after) + if (needs_clflush & CLFLUSH_AFTER) i915_gem_chipset_flush(to_i915(dev)); - else - obj->cache_dirty = true; intel_fb_obj_flush(obj, false, ORIGIN_CPU); return ret; @@ -1270,27 +1436,29 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, return -EFAULT; } - intel_runtime_pm_get(dev_priv); - - ret = i915_mutex_lock_interruptible(dev); - if (ret) - goto put_rpm; - - obj = to_intel_bo(drm_gem_object_lookup(file, args->handle)); - if (&obj->base == NULL) { - ret = -ENOENT; - goto unlock; - } + obj = i915_gem_object_lookup(file, args->handle); + if (!obj) + return -ENOENT; /* Bounds check destination. */ if (args->offset > obj->base.size || args->size > obj->base.size - args->offset) { ret = -EINVAL; - goto out; + goto err; } trace_i915_gem_object_pwrite(obj, args->offset, args->size); + ret = __unsafe_wait_rendering(obj, to_rps_client(file), false); + if (ret) + goto err; + + intel_runtime_pm_get(dev_priv); + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + goto err_rpm; + ret = -EFAULT; /* We can only do the GTT pwrite on untiled buffers, as otherwise * it would end up going through the fenced access, and we'll get @@ -1306,508 +1474,31 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * textures). Fallback to the shmem path in that case. */ } - if (ret == -EFAULT) { + if (ret == -EFAULT || ret == -ENOSPC) { if (obj->phys_handle) ret = i915_gem_phys_pwrite(obj, args, file); - else if (i915_gem_object_has_struct_page(obj)) - ret = i915_gem_shmem_pwrite(dev, obj, args, file); else - ret = -ENODEV; + ret = i915_gem_shmem_pwrite(dev, obj, args, file); } -out: - drm_gem_object_unreference(&obj->base); -unlock: + i915_gem_object_put(obj); mutex_unlock(&dev->struct_mutex); -put_rpm: intel_runtime_pm_put(dev_priv); return ret; -} - -static int -i915_gem_check_wedge(unsigned reset_counter, bool interruptible) -{ - if (__i915_terminally_wedged(reset_counter)) - return -EIO; - - if (__i915_reset_in_progress(reset_counter)) { - /* Non-interruptible callers can't handle -EAGAIN, hence return - * -EIO unconditionally for these. */ - if (!interruptible) - return -EIO; - - return -EAGAIN; - } - - return 0; -} - -static unsigned long local_clock_us(unsigned *cpu) -{ - unsigned long t; - - /* Cheaply and approximately convert from nanoseconds to microseconds. - * The result and subsequent calculations are also defined in the same - * approximate microseconds units. The principal source of timing - * error here is from the simple truncation. - * - * Note that local_clock() is only defined wrt to the current CPU; - * the comparisons are no longer valid if we switch CPUs. Instead of - * blocking preemption for the entire busywait, we can detect the CPU - * switch and use that as indicator of system load and a reason to - * stop busywaiting, see busywait_stop(). - */ - *cpu = get_cpu(); - t = local_clock() >> 10; - put_cpu(); - - return t; -} - -static bool busywait_stop(unsigned long timeout, unsigned cpu) -{ - unsigned this_cpu; - - if (time_after(local_clock_us(&this_cpu), timeout)) - return true; - - return this_cpu != cpu; -} - -bool __i915_spin_request(const struct drm_i915_gem_request *req, - int state, unsigned long timeout_us) -{ - unsigned cpu; - - /* When waiting for high frequency requests, e.g. during synchronous - * rendering split between the CPU and GPU, the finite amount of time - * required to set up the irq and wait upon it limits the response - * rate. By busywaiting on the request completion for a short while we - * can service the high frequency waits as quick as possible. However, - * if it is a slow request, we want to sleep as quickly as possible. - * The tradeoff between waiting and sleeping is roughly the time it - * takes to sleep on a request, on the order of a microsecond. - */ - - timeout_us += local_clock_us(&cpu); - do { - if (i915_gem_request_completed(req)) - return true; - - if (signal_pending_state(state, current)) - break; - - if (busywait_stop(timeout_us, cpu)) - break; - - cpu_relax_lowlatency(); - } while (!need_resched()); - - return false; -} - -/** - * __i915_wait_request - wait until execution of request has finished - * @req: duh! - * @interruptible: do an interruptible wait (normally yes) - * @timeout: in - how long to wait (NULL forever); out - how much time remaining - * @rps: RPS client - * - * Note: It is of utmost importance that the passed in seqno and reset_counter - * values have been read by the caller in an smp safe manner. Where read-side - * locks are involved, it is sufficient to read the reset_counter before - * unlocking the lock that protects the seqno. For lockless tricks, the - * reset_counter _must_ be read before, and an appropriate smp_rmb must be - * inserted. - * - * Returns 0 if the request was found within the alloted time. Else returns the - * errno with remaining time filled in timeout argument. - */ -int __i915_wait_request(struct drm_i915_gem_request *req, - bool interruptible, - s64 *timeout, - struct intel_rps_client *rps) -{ - int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; - DEFINE_WAIT(reset); - struct intel_wait wait; - unsigned long timeout_remain; - s64 before = 0; /* Only to silence a compiler warning. */ - int ret = 0; - - might_sleep(); - - if (list_empty(&req->list)) - return 0; - - if (i915_gem_request_completed(req)) - return 0; - - timeout_remain = MAX_SCHEDULE_TIMEOUT; - if (timeout) { - if (WARN_ON(*timeout < 0)) - return -EINVAL; - - if (*timeout == 0) - return -ETIME; - - timeout_remain = nsecs_to_jiffies_timeout(*timeout); - - /* - * Record current time in case interrupted by signal, or wedged. - */ - before = ktime_get_raw_ns(); - } - - trace_i915_gem_request_wait_begin(req); - - /* This client is about to stall waiting for the GPU. In many cases - * this is undesirable and limits the throughput of the system, as - * many clients cannot continue processing user input/output whilst - * blocked. RPS autotuning may take tens of milliseconds to respond - * to the GPU load and thus incurs additional latency for the client. - * We can circumvent that by promoting the GPU frequency to maximum - * before we wait. This makes the GPU throttle up much more quickly - * (good for benchmarks and user experience, e.g. window animations), - * but at a cost of spending more power processing the workload - * (bad for battery). Not all clients even want their results - * immediately and for them we should just let the GPU select its own - * frequency to maximise efficiency. To prevent a single client from - * forcing the clocks too high for the whole system, we only allow - * each client to waitboost once in a busy period. - */ - if (INTEL_INFO(req->i915)->gen >= 6) - gen6_rps_boost(req->i915, rps, req->emitted_jiffies); - - /* Optimistic spin for the next ~jiffie before touching IRQs */ - if (i915_spin_request(req, state, 5)) - goto complete; - - set_current_state(state); - add_wait_queue(&req->i915->gpu_error.wait_queue, &reset); - - intel_wait_init(&wait, req->seqno); - if (intel_engine_add_wait(req->engine, &wait)) - /* In order to check that we haven't missed the interrupt - * as we enabled it, we need to kick ourselves to do a - * coherent check on the seqno before we sleep. - */ - goto wakeup; - - for (;;) { - if (signal_pending_state(state, current)) { - ret = -ERESTARTSYS; - break; - } - - timeout_remain = io_schedule_timeout(timeout_remain); - if (timeout_remain == 0) { - ret = -ETIME; - break; - } - - if (intel_wait_complete(&wait)) - break; - - set_current_state(state); - -wakeup: - /* Carefully check if the request is complete, giving time - * for the seqno to be visible following the interrupt. - * We also have to check in case we are kicked by the GPU - * reset in order to drop the struct_mutex. - */ - if (__i915_request_irq_complete(req)) - break; - - /* Only spin if we know the GPU is processing this request */ - if (i915_spin_request(req, state, 2)) - break; - } - remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset); - - intel_engine_remove_wait(req->engine, &wait); - __set_current_state(TASK_RUNNING); -complete: - trace_i915_gem_request_wait_end(req); - - if (timeout) { - s64 tres = *timeout - (ktime_get_raw_ns() - before); - - *timeout = tres < 0 ? 0 : tres; - - /* - * Apparently ktime isn't accurate enough and occasionally has a - * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch - * things up to make the test happy. We allow up to 1 jiffy. - * - * This is a regrssion from the timespec->ktime conversion. - */ - if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000) - *timeout = 0; - } - - if (rps && req->seqno == req->engine->last_submitted_seqno) { - /* The GPU is now idle and this client has stalled. - * Since no other client has submitted a request in the - * meantime, assume that this client is the only one - * supplying work to the GPU but is unable to keep that - * work supplied because it is waiting. Since the GPU is - * then never kept fully busy, RPS autoclocking will - * keep the clocks relatively low, causing further delays. - * Compensate by giving the synchronous client credit for - * a waitboost next time. - */ - spin_lock(&req->i915->rps.client_lock); - list_del_init(&rps->link); - spin_unlock(&req->i915->rps.client_lock); - } - - return ret; -} - -int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, - struct drm_file *file) -{ - struct drm_i915_file_private *file_priv; - - WARN_ON(!req || !file || req->file_priv); - - if (!req || !file) - return -EINVAL; - - if (req->file_priv) - return -EINVAL; - - file_priv = file->driver_priv; - - spin_lock(&file_priv->mm.lock); - req->file_priv = file_priv; - list_add_tail(&req->client_list, &file_priv->mm.request_list); - spin_unlock(&file_priv->mm.lock); - - req->pid = get_pid(task_pid(current)); - - return 0; -} - -static inline void -i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) -{ - struct drm_i915_file_private *file_priv = request->file_priv; - - if (!file_priv) - return; - - spin_lock(&file_priv->mm.lock); - list_del(&request->client_list); - request->file_priv = NULL; - spin_unlock(&file_priv->mm.lock); - - put_pid(request->pid); - request->pid = NULL; -} - -static void i915_gem_request_retire(struct drm_i915_gem_request *request) -{ - trace_i915_gem_request_retire(request); - - /* We know the GPU must have read the request to have - * sent us the seqno + interrupt, so use the position - * of tail of the request to update the last known position - * of the GPU head. - * - * Note this requires that we are always called in request - * completion order. - */ - request->ringbuf->last_retired_head = request->postfix; - - list_del_init(&request->list); - i915_gem_request_remove_from_client(request); - - if (request->previous_context) { - if (i915.enable_execlists) - intel_lr_context_unpin(request->previous_context, - request->engine); - } - - i915_gem_context_unreference(request->ctx); - i915_gem_request_unreference(request); -} - -static void -__i915_gem_request_retire__upto(struct drm_i915_gem_request *req) -{ - struct intel_engine_cs *engine = req->engine; - struct drm_i915_gem_request *tmp; - - lockdep_assert_held(&engine->i915->drm.struct_mutex); - - if (list_empty(&req->list)) - return; - - do { - tmp = list_first_entry(&engine->request_list, - typeof(*tmp), list); - - i915_gem_request_retire(tmp); - } while (tmp != req); - - WARN_ON(i915_verify_lists(engine->dev)); -} - -/** - * Waits for a request to be signaled, and cleans up the - * request and object lists appropriately for that event. - * @req: request to wait on - */ -int -i915_wait_request(struct drm_i915_gem_request *req) -{ - struct drm_i915_private *dev_priv = req->i915; - bool interruptible; - int ret; - - interruptible = dev_priv->mm.interruptible; - - BUG_ON(!mutex_is_locked(&dev_priv->drm.struct_mutex)); - - ret = __i915_wait_request(req, interruptible, NULL, NULL); - if (ret) - return ret; - - /* If the GPU hung, we want to keep the requests to find the guilty. */ - if (!i915_reset_in_progress(&dev_priv->gpu_error)) - __i915_gem_request_retire__upto(req); - - return 0; -} - -/** - * Ensures that all rendering to the object has completed and the object is - * safe to unbind from the GTT or access from the CPU. - * @obj: i915 gem object - * @readonly: waiting for read access or write - */ -int -i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, - bool readonly) -{ - int ret, i; - - if (!obj->active) - return 0; - - if (readonly) { - if (obj->last_write_req != NULL) { - ret = i915_wait_request(obj->last_write_req); - if (ret) - return ret; - - i = obj->last_write_req->engine->id; - if (obj->last_read_req[i] == obj->last_write_req) - i915_gem_object_retire__read(obj, i); - else - i915_gem_object_retire__write(obj); - } - } else { - for (i = 0; i < I915_NUM_ENGINES; i++) { - if (obj->last_read_req[i] == NULL) - continue; - - ret = i915_wait_request(obj->last_read_req[i]); - if (ret) - return ret; - - i915_gem_object_retire__read(obj, i); - } - GEM_BUG_ON(obj->active); - } - - return 0; -} - -static void -i915_gem_object_retire_request(struct drm_i915_gem_object *obj, - struct drm_i915_gem_request *req) -{ - int ring = req->engine->id; - - if (obj->last_read_req[ring] == req) - i915_gem_object_retire__read(obj, ring); - else if (obj->last_write_req == req) - i915_gem_object_retire__write(obj); - - if (!i915_reset_in_progress(&req->i915->gpu_error)) - __i915_gem_request_retire__upto(req); -} - -/* A nonblocking variant of the above wait. This is a highly dangerous routine - * as the object state may change during this call. - */ -static __must_check int -i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, - struct intel_rps_client *rps, - bool readonly) -{ - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_i915_gem_request *requests[I915_NUM_ENGINES]; - int ret, i, n = 0; - - BUG_ON(!mutex_is_locked(&dev->struct_mutex)); - BUG_ON(!dev_priv->mm.interruptible); - - if (!obj->active) - return 0; - - if (readonly) { - struct drm_i915_gem_request *req; - - req = obj->last_write_req; - if (req == NULL) - return 0; - - requests[n++] = i915_gem_request_reference(req); - } else { - for (i = 0; i < I915_NUM_ENGINES; i++) { - struct drm_i915_gem_request *req; - - req = obj->last_read_req[i]; - if (req == NULL) - continue; - - requests[n++] = i915_gem_request_reference(req); - } - } - - mutex_unlock(&dev->struct_mutex); - ret = 0; - for (i = 0; ret == 0 && i < n; i++) - ret = __i915_wait_request(requests[i], true, NULL, rps); - mutex_lock(&dev->struct_mutex); - - for (i = 0; i < n; i++) { - if (ret == 0) - i915_gem_object_retire_request(obj, requests[i]); - i915_gem_request_unreference(requests[i]); - } +err_rpm: + intel_runtime_pm_put(dev_priv); +err: + i915_gem_object_put_unlocked(obj); return ret; } -static struct intel_rps_client *to_rps_client(struct drm_file *file) -{ - struct drm_i915_file_private *fpriv = file->driver_priv; - return &fpriv->rps; -} - -static enum fb_op_origin +static inline enum fb_op_origin write_origin(struct drm_i915_gem_object *obj, unsigned domain) { - return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ? - ORIGIN_GTT : ORIGIN_CPU; + return (domain == I915_GEM_DOMAIN_GTT ? + obj->frontbuffer_ggtt_origin : ORIGIN_CPU); } /** @@ -1828,10 +1519,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, int ret; /* Only handle setting domains to types used by the CPU. */ - if (write_domain & I915_GEM_GPU_DOMAINS) - return -EINVAL; - - if (read_domains & I915_GEM_GPU_DOMAINS) + if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS) return -EINVAL; /* Having something in the write domain implies it's in the read @@ -1840,25 +1528,21 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, if (write_domain != 0 && read_domains != write_domain) return -EINVAL; - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; - - obj = to_intel_bo(drm_gem_object_lookup(file, args->handle)); - if (&obj->base == NULL) { - ret = -ENOENT; - goto unlock; - } + obj = i915_gem_object_lookup(file, args->handle); + if (!obj) + return -ENOENT; /* Try to flush the object off the GPU without holding the lock. * We will repeat the flush holding the lock in the normal manner * to catch cases where we are gazumped. */ - ret = i915_gem_object_wait_rendering__nonblocking(obj, - to_rps_client(file), - !write_domain); + ret = __unsafe_wait_rendering(obj, to_rps_client(file), !write_domain); if (ret) - goto unref; + goto err; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + goto err; if (read_domains & I915_GEM_DOMAIN_GTT) ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); @@ -1868,11 +1552,13 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, if (write_domain != 0) intel_fb_obj_invalidate(obj, write_origin(obj, write_domain)); -unref: - drm_gem_object_unreference(&obj->base); -unlock: + i915_gem_object_put(obj); mutex_unlock(&dev->struct_mutex); return ret; + +err: + i915_gem_object_put_unlocked(obj); + return ret; } /** @@ -1887,26 +1573,23 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_sw_finish *args = data; struct drm_i915_gem_object *obj; - int ret = 0; + int err = 0; - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; - - obj = to_intel_bo(drm_gem_object_lookup(file, args->handle)); - if (&obj->base == NULL) { - ret = -ENOENT; - goto unlock; - } + obj = i915_gem_object_lookup(file, args->handle); + if (!obj) + return -ENOENT; /* Pinned buffers may be scanout, so flush the cache */ - if (obj->pin_display) - i915_gem_object_flush_cpu_write_domain(obj); + if (READ_ONCE(obj->pin_display)) { + err = i915_mutex_lock_interruptible(dev); + if (!err) { + i915_gem_object_flush_cpu_write_domain(obj); + mutex_unlock(&dev->struct_mutex); + } + } - drm_gem_object_unreference(&obj->base); -unlock: - mutex_unlock(&dev->struct_mutex); - return ret; + i915_gem_object_put_unlocked(obj); + return err; } /** @@ -1934,7 +1617,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_mmap *args = data; - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; unsigned long addr; if (args->flags & ~(I915_MMAP_WC)) @@ -1943,19 +1626,19 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT)) return -ENODEV; - obj = drm_gem_object_lookup(file, args->handle); - if (obj == NULL) + obj = i915_gem_object_lookup(file, args->handle); + if (!obj) return -ENOENT; /* prime objects have no backing filp to GEM mmap * pages from. */ - if (!obj->filp) { - drm_gem_object_unreference_unlocked(obj); + if (!obj->base.filp) { + i915_gem_object_put_unlocked(obj); return -EINVAL; } - addr = vm_mmap(obj->filp, 0, args->size, + addr = vm_mmap(obj->base.filp, 0, args->size, PROT_READ | PROT_WRITE, MAP_SHARED, args->offset); if (args->flags & I915_MMAP_WC) { @@ -1963,7 +1646,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, struct vm_area_struct *vma; if (down_write_killable(&mm->mmap_sem)) { - drm_gem_object_unreference_unlocked(obj); + i915_gem_object_put_unlocked(obj); return -EINTR; } vma = find_vma(mm, addr); @@ -1975,9 +1658,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, up_write(&mm->mmap_sem); /* This may race, but that's ok, it only gets set */ - WRITE_ONCE(to_intel_bo(obj)->has_wc_mmap, true); + WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU); } - drm_gem_object_unreference_unlocked(obj); + i915_gem_object_put_unlocked(obj); if (IS_ERR((void *)addr)) return addr; @@ -1986,9 +1669,19 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, return 0; } +static unsigned int tile_row_pages(struct drm_i915_gem_object *obj) +{ + u64 size; + + size = i915_gem_object_get_stride(obj); + size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8; + + return size >> PAGE_SHIFT; +} + /** * i915_gem_fault - fault a page into the GTT - * @vma: VMA in question + * @area: CPU VMA in question * @vmf: fault info * * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped @@ -2002,121 +1695,116 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, * suffer if the GTT working set is large or there are few fence registers * left. */ -int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf) { - struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); +#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */ + struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data); struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; - struct i915_ggtt_view view = i915_ggtt_view_normal; - pgoff_t page_offset; - unsigned long pfn; - int ret = 0; bool write = !!(vmf->flags & FAULT_FLAG_WRITE); - - intel_runtime_pm_get(dev_priv); + struct i915_vma *vma; + pgoff_t page_offset; + unsigned int flags; + int ret; /* We don't use vmf->pgoff since that has the fake offset */ - page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> + page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >> PAGE_SHIFT; - ret = i915_mutex_lock_interruptible(dev); - if (ret) - goto out; - trace_i915_gem_object_fault(obj, page_offset, true, write); /* Try to flush the object off the GPU first without holding the lock. - * Upon reacquiring the lock, we will perform our sanity checks and then + * Upon acquiring the lock, we will perform our sanity checks and then * repeat the flush holding the lock in the normal manner to catch cases * where we are gazumped. */ - ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write); + ret = __unsafe_wait_rendering(obj, NULL, !write); if (ret) - goto unlock; + goto err; + + intel_runtime_pm_get(dev_priv); + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + goto err_rpm; /* Access to snoopable pages through the GTT is incoherent. */ if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) { ret = -EFAULT; - goto unlock; + goto err_unlock; } - /* Use a partial view if the object is bigger than the aperture. */ - if (obj->base.size >= ggtt->mappable_end && - obj->tiling_mode == I915_TILING_NONE) { - static const unsigned int chunk_size = 256; // 1 MiB + /* If the object is smaller than a couple of partial vma, it is + * not worth only creating a single partial vma - we may as well + * clear enough space for the full object. + */ + flags = PIN_MAPPABLE; + if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT) + flags |= PIN_NONBLOCK | PIN_NONFAULT; + + /* Now pin it into the GTT as needed */ + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags); + if (IS_ERR(vma)) { + struct i915_ggtt_view view; + unsigned int chunk_size; + + /* Use a partial view if it is bigger than available space */ + chunk_size = MIN_CHUNK_PAGES; + if (i915_gem_object_is_tiled(obj)) + chunk_size = max(chunk_size, tile_row_pages(obj)); memset(&view, 0, sizeof(view)); view.type = I915_GGTT_VIEW_PARTIAL; view.params.partial.offset = rounddown(page_offset, chunk_size); view.params.partial.size = - min_t(unsigned int, - chunk_size, - (vma->vm_end - vma->vm_start)/PAGE_SIZE - + min_t(unsigned int, chunk_size, + (area->vm_end - area->vm_start) / PAGE_SIZE - view.params.partial.offset); - } - /* Now pin it into the GTT if needed */ - ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE); - if (ret) - goto unlock; + /* If the partial covers the entire object, just create a + * normal VMA. + */ + if (chunk_size >= obj->base.size >> PAGE_SHIFT) + view.type = I915_GGTT_VIEW_NORMAL; + + /* Userspace is now writing through an untracked VMA, abandon + * all hope that the hardware is able to track future writes. + */ + obj->frontbuffer_ggtt_origin = ORIGIN_CPU; + + vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); + } + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err_unlock; + } ret = i915_gem_object_set_to_gtt_domain(obj, write); if (ret) - goto unpin; + goto err_unpin; - ret = i915_gem_object_get_fence(obj); + ret = i915_vma_get_fence(vma); if (ret) - goto unpin; + goto err_unpin; /* Finally, remap it using the new GTT offset */ - pfn = ggtt->mappable_base + - i915_gem_obj_ggtt_offset_view(obj, &view); - pfn >>= PAGE_SHIFT; - - if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) { - /* Overriding existing pages in partial view does not cause - * us any trouble as TLBs are still valid because the fault - * is due to userspace losing part of the mapping or never - * having accessed it before (at this partials' range). - */ - unsigned long base = vma->vm_start + - (view.params.partial.offset << PAGE_SHIFT); - unsigned int i; - - for (i = 0; i < view.params.partial.size; i++) { - ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i); - if (ret) - break; - } - - obj->fault_mappable = true; - } else { - if (!obj->fault_mappable) { - unsigned long size = min_t(unsigned long, - vma->vm_end - vma->vm_start, - obj->base.size); - int i; - - for (i = 0; i < size >> PAGE_SHIFT; i++) { - ret = vm_insert_pfn(vma, - (unsigned long)vma->vm_start + i * PAGE_SIZE, - pfn + i); - if (ret) - break; - } + ret = remap_io_mapping(area, + area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT), + (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT, + min_t(u64, vma->size, area->vm_end - area->vm_start), + &ggtt->mappable); + if (ret) + goto err_unpin; - obj->fault_mappable = true; - } else - ret = vm_insert_pfn(vma, - (unsigned long)vmf->virtual_address, - pfn + page_offset); - } -unpin: - i915_gem_object_ggtt_unpin_view(obj, &view); -unlock: + obj->fault_mappable = true; +err_unpin: + __i915_vma_unpin(vma); +err_unlock: mutex_unlock(&dev->struct_mutex); -out: +err_rpm: + intel_runtime_pm_put(dev_priv); +err: switch (ret) { case -EIO: /* @@ -2157,8 +1845,6 @@ out: ret = VM_FAULT_SIGBUS; break; } - - intel_runtime_pm_put(dev_priv); return ret; } @@ -2212,46 +1898,58 @@ i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv) i915_gem_release_mmap(obj); } -uint32_t -i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) +/** + * i915_gem_get_ggtt_size - return required global GTT size for an object + * @dev_priv: i915 device + * @size: object size + * @tiling_mode: tiling mode + * + * Return the required global GTT size for an object, taking into account + * potential fence register mapping. + */ +u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, + u64 size, int tiling_mode) { - uint32_t gtt_size; + u64 ggtt_size; + + GEM_BUG_ON(size == 0); - if (INTEL_INFO(dev)->gen >= 4 || + if (INTEL_GEN(dev_priv) >= 4 || tiling_mode == I915_TILING_NONE) return size; /* Previous chips need a power-of-two fence region when tiling */ - if (IS_GEN3(dev)) - gtt_size = 1024*1024; + if (IS_GEN3(dev_priv)) + ggtt_size = 1024*1024; else - gtt_size = 512*1024; + ggtt_size = 512*1024; - while (gtt_size < size) - gtt_size <<= 1; + while (ggtt_size < size) + ggtt_size <<= 1; - return gtt_size; + return ggtt_size; } /** - * i915_gem_get_gtt_alignment - return required GTT alignment for an object - * @dev: drm device + * i915_gem_get_ggtt_alignment - return required global GTT alignment + * @dev_priv: i915 device * @size: object size * @tiling_mode: tiling mode - * @fenced: is fenced alignemned required or not + * @fenced: is fenced alignment required or not * - * Return the required GTT alignment for an object, taking into account + * Return the required global GTT alignment for an object, taking into account * potential fence register mapping. */ -uint32_t -i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, - int tiling_mode, bool fenced) +u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size, + int tiling_mode, bool fenced) { + GEM_BUG_ON(size == 0); + /* * Minimum alignment is 4k (GTT page size), but might be greater * if a fence register is needed for the object. */ - if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) || + if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) || tiling_mode == I915_TILING_NONE) return 4096; @@ -2259,42 +1957,34 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, * Previous chips need to be aligned to the size of the smallest * fence register that can contain the object. */ - return i915_gem_get_gtt_size(dev, size, tiling_mode); + return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode); } static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - int ret; - - dev_priv->mm.shrinker_no_lock_stealing = true; + int err; - ret = drm_gem_create_mmap_offset(&obj->base); - if (ret != -ENOSPC) - goto out; + err = drm_gem_create_mmap_offset(&obj->base); + if (!err) + return 0; - /* Badly fragmented mmap space? The only way we can recover - * space is by destroying unwanted objects. We can't randomly release - * mmap_offsets as userspace expects them to be persistent for the - * lifetime of the objects. The closest we can is to release the - * offsets on purgeable objects by truncating it and marking it purged, - * which prevents userspace from ever using that object again. + /* We can idle the GPU locklessly to flush stale objects, but in order + * to claim that space for ourselves, we need to take the big + * struct_mutex to free the requests+objects and allocate our slot. */ - i915_gem_shrink(dev_priv, - obj->base.size >> PAGE_SHIFT, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_PURGEABLE); - ret = drm_gem_create_mmap_offset(&obj->base); - if (ret != -ENOSPC) - goto out; + err = i915_gem_wait_for_idle(dev_priv, true); + if (err) + return err; - i915_gem_shrink_all(dev_priv); - ret = drm_gem_create_mmap_offset(&obj->base); -out: - dev_priv->mm.shrinker_no_lock_stealing = false; + err = i915_mutex_lock_interruptible(&dev_priv->drm); + if (!err) { + i915_gem_retire_requests(dev_priv); + err = drm_gem_create_mmap_offset(&obj->base); + mutex_unlock(&dev_priv->drm.struct_mutex); + } - return ret; + return err; } static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) @@ -2311,32 +2001,15 @@ i915_gem_mmap_gtt(struct drm_file *file, struct drm_i915_gem_object *obj; int ret; - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; - - obj = to_intel_bo(drm_gem_object_lookup(file, handle)); - if (&obj->base == NULL) { - ret = -ENOENT; - goto unlock; - } - - if (obj->madv != I915_MADV_WILLNEED) { - DRM_DEBUG("Attempting to mmap a purgeable buffer\n"); - ret = -EFAULT; - goto out; - } + obj = i915_gem_object_lookup(file, handle); + if (!obj) + return -ENOENT; ret = i915_gem_object_create_mmap_offset(obj); - if (ret) - goto out; - - *offset = drm_vma_node_offset_addr(&obj->base.vma_node); + if (ret == 0) + *offset = drm_vma_node_offset_addr(&obj->base.vma_node); -out: - drm_gem_object_unreference(&obj->base); -unlock: - mutex_unlock(&dev->struct_mutex); + i915_gem_object_put_unlocked(obj); return ret; } @@ -2454,7 +2127,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) if (obj->pages_pin_count) return -EBUSY; - BUG_ON(i915_gem_obj_bound_any(obj)); + GEM_BUG_ON(obj->bind_count); /* ->put_pages might need to allocate memory for the bit17 swizzle * array, hence protect them from being reaped by removing them from gtt @@ -2462,10 +2135,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) list_del(&obj->global_list); if (obj->mapping) { - if (is_vmalloc_addr(obj->mapping)) - vunmap(obj->mapping); + void *ptr; + + ptr = ptr_mask_bits(obj->mapping); + if (is_vmalloc_addr(ptr)) + vunmap(ptr); else - kunmap(kmap_to_page(obj->mapping)); + kunmap(kmap_to_page(ptr)); + obj->mapping = NULL; } @@ -2574,7 +2251,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_do_bit_17_swizzle(obj); - if (obj->tiling_mode != I915_TILING_NONE && + if (i915_gem_object_is_tiled(obj) && dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) i915_gem_object_pin_pages(obj); @@ -2638,7 +2315,8 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj) } /* The 'mapping' part of i915_gem_object_pin_map() below */ -static void *i915_gem_object_map(const struct drm_i915_gem_object *obj) +static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, + enum i915_map_type type) { unsigned long n_pages = obj->base.size >> PAGE_SHIFT; struct sg_table *sgt = obj->pages; @@ -2647,10 +2325,11 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj) struct page *stack_pages[32]; struct page **pages = stack_pages; unsigned long i = 0; + pgprot_t pgprot; void *addr; /* A single page can always be kmapped */ - if (n_pages == 1) + if (n_pages == 1 && type == I915_MAP_WB) return kmap(sg_page(sgt->sgl)); if (n_pages > ARRAY_SIZE(stack_pages)) { @@ -2666,7 +2345,15 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj) /* Check that we have the expected number of pages */ GEM_BUG_ON(i != n_pages); - addr = vmap(pages, n_pages, 0, PAGE_KERNEL); + switch (type) { + case I915_MAP_WB: + pgprot = PAGE_KERNEL; + break; + case I915_MAP_WC: + pgprot = pgprot_writecombine(PAGE_KERNEL_IO); + break; + } + addr = vmap(pages, n_pages, 0, pgprot); if (pages != stack_pages) drm_free_large(pages); @@ -2675,276 +2362,89 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj) } /* get, pin, and map the pages of the object into kernel space */ -void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj) +void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, + enum i915_map_type type) { + enum i915_map_type has_type; + bool pinned; + void *ptr; int ret; lockdep_assert_held(&obj->base.dev->struct_mutex); + GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); ret = i915_gem_object_get_pages(obj); if (ret) return ERR_PTR(ret); i915_gem_object_pin_pages(obj); + pinned = obj->pages_pin_count > 1; - if (!obj->mapping) { - obj->mapping = i915_gem_object_map(obj); - if (!obj->mapping) { - i915_gem_object_unpin_pages(obj); - return ERR_PTR(-ENOMEM); + ptr = ptr_unpack_bits(obj->mapping, has_type); + if (ptr && has_type != type) { + if (pinned) { + ret = -EBUSY; + goto err; } - } - return obj->mapping; -} + if (is_vmalloc_addr(ptr)) + vunmap(ptr); + else + kunmap(kmap_to_page(ptr)); -void i915_vma_move_to_active(struct i915_vma *vma, - struct drm_i915_gem_request *req) -{ - struct drm_i915_gem_object *obj = vma->obj; - struct intel_engine_cs *engine; + ptr = obj->mapping = NULL; + } - engine = i915_gem_request_get_engine(req); + if (!ptr) { + ptr = i915_gem_object_map(obj, type); + if (!ptr) { + ret = -ENOMEM; + goto err; + } - /* Add a reference if we're newly entering the active list. */ - if (obj->active == 0) - drm_gem_object_reference(&obj->base); - obj->active |= intel_engine_flag(engine); + obj->mapping = ptr_pack_bits(ptr, type); + } - list_move_tail(&obj->engine_list[engine->id], &engine->active_list); - i915_gem_request_assign(&obj->last_read_req[engine->id], req); + return ptr; - list_move_tail(&vma->vm_link, &vma->vm->active_list); +err: + i915_gem_object_unpin_pages(obj); + return ERR_PTR(ret); } static void -i915_gem_object_retire__write(struct drm_i915_gem_object *obj) +i915_gem_object_retire__write(struct i915_gem_active *active, + struct drm_i915_gem_request *request) { - GEM_BUG_ON(obj->last_write_req == NULL); - GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine))); + struct drm_i915_gem_object *obj = + container_of(active, struct drm_i915_gem_object, last_write); - i915_gem_request_assign(&obj->last_write_req, NULL); intel_fb_obj_flush(obj, true, ORIGIN_CS); } static void -i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring) +i915_gem_object_retire__read(struct i915_gem_active *active, + struct drm_i915_gem_request *request) { - struct i915_vma *vma; + int idx = request->engine->id; + struct drm_i915_gem_object *obj = + container_of(active, struct drm_i915_gem_object, last_read[idx]); - GEM_BUG_ON(obj->last_read_req[ring] == NULL); - GEM_BUG_ON(!(obj->active & (1 << ring))); + GEM_BUG_ON(!i915_gem_object_has_active_engine(obj, idx)); - list_del_init(&obj->engine_list[ring]); - i915_gem_request_assign(&obj->last_read_req[ring], NULL); - - if (obj->last_write_req && obj->last_write_req->engine->id == ring) - i915_gem_object_retire__write(obj); - - obj->active &= ~(1 << ring); - if (obj->active) + i915_gem_object_clear_active(obj, idx); + if (i915_gem_object_is_active(obj)) return; /* Bump our place on the bound list to keep it roughly in LRU order * so that we don't steal from recently used but inactive objects * (unless we are forced to ofc!) */ - list_move_tail(&obj->global_list, - &to_i915(obj->base.dev)->mm.bound_list); - - list_for_each_entry(vma, &obj->vma_list, obj_link) { - if (!list_empty(&vma->vm_link)) - list_move_tail(&vma->vm_link, &vma->vm->inactive_list); - } - - i915_gem_request_assign(&obj->last_fenced_req, NULL); - drm_gem_object_unreference(&obj->base); -} - -static int -i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno) -{ - struct intel_engine_cs *engine; - int ret; - - /* Carefully retire all requests without writing to the rings */ - for_each_engine(engine, dev_priv) { - ret = intel_engine_idle(engine); - if (ret) - return ret; - } - i915_gem_retire_requests(dev_priv); - - /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ - if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) { - while (intel_kick_waiters(dev_priv) || - intel_kick_signalers(dev_priv)) - yield(); - } + if (obj->bind_count) + list_move_tail(&obj->global_list, + &request->i915->mm.bound_list); - /* Finally reset hw state */ - for_each_engine(engine, dev_priv) - intel_ring_init_seqno(engine, seqno); - - return 0; -} - -int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - int ret; - - if (seqno == 0) - return -EINVAL; - - /* HWS page needs to be set less than what we - * will inject to ring - */ - ret = i915_gem_init_seqno(dev_priv, seqno - 1); - if (ret) - return ret; - - /* Carefully set the last_seqno value so that wrap - * detection still works - */ - dev_priv->next_seqno = seqno; - dev_priv->last_seqno = seqno - 1; - if (dev_priv->last_seqno == 0) - dev_priv->last_seqno--; - - return 0; -} - -int -i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno) -{ - /* reserve 0 for non-seqno */ - if (dev_priv->next_seqno == 0) { - int ret = i915_gem_init_seqno(dev_priv, 0); - if (ret) - return ret; - - dev_priv->next_seqno = 1; - } - - *seqno = dev_priv->last_seqno = dev_priv->next_seqno++; - return 0; -} - -static void i915_gem_mark_busy(const struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - - dev_priv->gt.active_engines |= intel_engine_flag(engine); - if (dev_priv->gt.awake) - return; - - intel_runtime_pm_get_noresume(dev_priv); - dev_priv->gt.awake = true; - - i915_update_gfx_val(dev_priv); - if (INTEL_GEN(dev_priv) >= 6) - gen6_rps_busy(dev_priv); - - queue_delayed_work(dev_priv->wq, - &dev_priv->gt.retire_work, - round_jiffies_up_relative(HZ)); -} - -/* - * NB: This function is not allowed to fail. Doing so would mean the the - * request is not being tracked for completion but the work itself is - * going to happen on the hardware. This would be a Bad Thing(tm). - */ -void __i915_add_request(struct drm_i915_gem_request *request, - struct drm_i915_gem_object *obj, - bool flush_caches) -{ - struct intel_engine_cs *engine; - struct intel_ringbuffer *ringbuf; - u32 request_start; - u32 reserved_tail; - int ret; - - if (WARN_ON(request == NULL)) - return; - - engine = request->engine; - ringbuf = request->ringbuf; - - /* - * To ensure that this call will not fail, space for its emissions - * should already have been reserved in the ring buffer. Let the ring - * know that it is time to use that space up. - */ - request_start = intel_ring_get_tail(ringbuf); - reserved_tail = request->reserved_space; - request->reserved_space = 0; - - /* - * Emit any outstanding flushes - execbuf can fail to emit the flush - * after having emitted the batchbuffer command. Hence we need to fix - * things up similar to emitting the lazy request. The difference here - * is that the flush _must_ happen before the next request, no matter - * what. - */ - if (flush_caches) { - if (i915.enable_execlists) - ret = logical_ring_flush_all_caches(request); - else - ret = intel_ring_flush_all_caches(request); - /* Not allowed to fail! */ - WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret); - } - - trace_i915_gem_request_add(request); - - request->head = request_start; - - /* Whilst this request exists, batch_obj will be on the - * active_list, and so will hold the active reference. Only when this - * request is retired will the the batch_obj be moved onto the - * inactive_list and lose its active reference. Hence we do not need - * to explicitly hold another reference here. - */ - request->batch_obj = obj; - - /* Seal the request and mark it as pending execution. Note that - * we may inspect this state, without holding any locks, during - * hangcheck. Hence we apply the barrier to ensure that we do not - * see a more recent value in the hws than we are tracking. - */ - request->emitted_jiffies = jiffies; - request->previous_seqno = engine->last_submitted_seqno; - smp_store_mb(engine->last_submitted_seqno, request->seqno); - list_add_tail(&request->list, &engine->request_list); - - /* Record the position of the start of the request so that - * should we detect the updated seqno part-way through the - * GPU processing the request, we never over-estimate the - * position of the head. - */ - request->postfix = intel_ring_get_tail(ringbuf); - - if (i915.enable_execlists) - ret = engine->emit_request(request); - else { - ret = engine->add_request(request); - - request->tail = intel_ring_get_tail(ringbuf); - } - /* Not allowed to fail! */ - WARN(ret, "emit|add_request failed: %d!\n", ret); - /* Sanity check that the reserved size was large enough. */ - ret = intel_ring_get_tail(ringbuf) - request_start; - if (ret < 0) - ret += ringbuf->size; - WARN_ONCE(ret > reserved_tail, - "Not enough space reserved (%d bytes) " - "for adding the request (%d bytes)\n", - reserved_tail, ret); - - i915_gem_mark_busy(engine); + i915_gem_object_put(obj); } static bool i915_context_is_banned(const struct i915_gem_context *ctx) @@ -2978,101 +2478,6 @@ static void i915_set_reset_status(struct i915_gem_context *ctx, } } -void i915_gem_request_free(struct kref *req_ref) -{ - struct drm_i915_gem_request *req = container_of(req_ref, - typeof(*req), ref); - kmem_cache_free(req->i915->requests, req); -} - -static inline int -__i915_gem_request_alloc(struct intel_engine_cs *engine, - struct i915_gem_context *ctx, - struct drm_i915_gem_request **req_out) -{ - struct drm_i915_private *dev_priv = engine->i915; - unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error); - struct drm_i915_gem_request *req; - int ret; - - if (!req_out) - return -EINVAL; - - *req_out = NULL; - - /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report - * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex - * and restart. - */ - ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible); - if (ret) - return ret; - - req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL); - if (req == NULL) - return -ENOMEM; - - ret = i915_gem_get_seqno(engine->i915, &req->seqno); - if (ret) - goto err; - - kref_init(&req->ref); - req->i915 = dev_priv; - req->engine = engine; - req->ctx = ctx; - i915_gem_context_reference(req->ctx); - - /* - * Reserve space in the ring buffer for all the commands required to - * eventually emit this request. This is to guarantee that the - * i915_add_request() call can't fail. Note that the reserve may need - * to be redone if the request is not actually submitted straight - * away, e.g. because a GPU scheduler has deferred it. - */ - req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; - - if (i915.enable_execlists) - ret = intel_logical_ring_alloc_request_extras(req); - else - ret = intel_ring_alloc_request_extras(req); - if (ret) - goto err_ctx; - - *req_out = req; - return 0; - -err_ctx: - i915_gem_context_unreference(ctx); -err: - kmem_cache_free(dev_priv->requests, req); - return ret; -} - -/** - * i915_gem_request_alloc - allocate a request structure - * - * @engine: engine that we wish to issue the request on. - * @ctx: context that the request will be associated with. - * This can be NULL if the request is not directly related to - * any specific user context, in which case this function will - * choose an appropriate context to use. - * - * Returns a pointer to the allocated request if successful, - * or an error code if not. - */ -struct drm_i915_gem_request * -i915_gem_request_alloc(struct intel_engine_cs *engine, - struct i915_gem_context *ctx) -{ - struct drm_i915_gem_request *req; - int err; - - if (ctx == NULL) - ctx = engine->i915->kernel_context; - err = __i915_gem_request_alloc(engine, ctx, &req); - return err ? ERR_PTR(err) : req; -} - struct drm_i915_gem_request * i915_gem_find_active_request(struct intel_engine_cs *engine) { @@ -3086,7 +2491,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine) * extra delay for a recent interrupt is pointless. Hence, we do * not need an engine->irq_seqno_barrier() before the seqno reads. */ - list_for_each_entry(request, &engine->request_list, list) { + list_for_each_entry(request, &engine->request_list, link) { if (i915_gem_request_completed(request)) continue; @@ -3108,23 +2513,20 @@ static void i915_gem_reset_engine_status(struct intel_engine_cs *engine) ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG; i915_set_reset_status(request->ctx, ring_hung); - list_for_each_entry_continue(request, &engine->request_list, list) + list_for_each_entry_continue(request, &engine->request_list, link) i915_set_reset_status(request->ctx, false); } static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine) { - struct intel_ringbuffer *buffer; - - while (!list_empty(&engine->active_list)) { - struct drm_i915_gem_object *obj; - - obj = list_first_entry(&engine->active_list, - struct drm_i915_gem_object, - engine_list[engine->id]); + struct drm_i915_gem_request *request; + struct intel_ring *ring; - i915_gem_object_retire__read(obj, engine->id); - } + /* Mark all pending requests as complete so that any concurrent + * (lockless) lookup doesn't try and wait upon the request as we + * reset it. + */ + intel_engine_init_seqno(engine, engine->last_submitted_seqno); /* * Clear the execlists queue up before freeing the requests, as those @@ -3146,15 +2548,11 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine) * implicit references on things like e.g. ppgtt address spaces through * the request. */ - while (!list_empty(&engine->request_list)) { - struct drm_i915_gem_request *request; - - request = list_first_entry(&engine->request_list, - struct drm_i915_gem_request, - list); - - i915_gem_request_retire(request); - } + request = i915_gem_active_raw(&engine->last_request, + &engine->i915->drm.struct_mutex); + if (request) + i915_gem_request_retire_upto(request); + GEM_BUG_ON(intel_engine_is_active(engine)); /* Having flushed all requests from all queues, we know that all * ringbuffers must now be empty. However, since we do not reclaim @@ -3163,12 +2561,12 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine) * upon reset is less than when we start. Do one more pass over * all the ringbuffers to reset last_retired_head. */ - list_for_each_entry(buffer, &engine->buffers, link) { - buffer->last_retired_head = buffer->tail; - intel_ring_update_space(buffer); + list_for_each_entry(ring, &engine->buffers, link) { + ring->last_retired_head = ring->tail; + intel_ring_update_space(ring); } - intel_ring_init_seqno(engine, engine->last_submitted_seqno); + engine->i915->gt.active_engines &= ~intel_engine_flag(engine); } void i915_gem_reset(struct drm_device *dev) @@ -3186,82 +2584,11 @@ void i915_gem_reset(struct drm_device *dev) for_each_engine(engine, dev_priv) i915_gem_reset_engine_cleanup(engine); + mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0); i915_gem_context_reset(dev); i915_gem_restore_fences(dev); - - WARN_ON(i915_verify_lists(dev)); -} - -/** - * This function clears the request list as sequence numbers are passed. - * @engine: engine to retire requests on - */ -void -i915_gem_retire_requests_ring(struct intel_engine_cs *engine) -{ - WARN_ON(i915_verify_lists(engine->dev)); - - /* Retire requests first as we use it above for the early return. - * If we retire requests last, we may use a later seqno and so clear - * the requests lists without clearing the active list, leading to - * confusion. - */ - while (!list_empty(&engine->request_list)) { - struct drm_i915_gem_request *request; - - request = list_first_entry(&engine->request_list, - struct drm_i915_gem_request, - list); - - if (!i915_gem_request_completed(request)) - break; - - i915_gem_request_retire(request); - } - - /* Move any buffers on the active list that are no longer referenced - * by the ringbuffer to the flushing/inactive lists as appropriate, - * before we free the context associated with the requests. - */ - while (!list_empty(&engine->active_list)) { - struct drm_i915_gem_object *obj; - - obj = list_first_entry(&engine->active_list, - struct drm_i915_gem_object, - engine_list[engine->id]); - - if (!list_empty(&obj->last_read_req[engine->id]->list)) - break; - - i915_gem_object_retire__read(obj, engine->id); - } - - WARN_ON(i915_verify_lists(engine->dev)); -} - -void i915_gem_retire_requests(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - - lockdep_assert_held(&dev_priv->drm.struct_mutex); - - if (dev_priv->gt.active_engines == 0) - return; - - GEM_BUG_ON(!dev_priv->gt.awake); - - for_each_engine(engine, dev_priv) { - i915_gem_retire_requests_ring(engine); - if (list_empty(&engine->request_list)) - dev_priv->gt.active_engines &= ~intel_engine_flag(engine); - } - - if (dev_priv->gt.active_engines == 0) - queue_delayed_work(dev_priv->wq, - &dev_priv->gt.idle_work, - msecs_to_jiffies(100)); } static void @@ -3281,10 +2608,12 @@ i915_gem_retire_work_handler(struct work_struct *work) * We do not need to do this test under locking as in the worst-case * we queue the retire worker once too often. */ - if (READ_ONCE(dev_priv->gt.awake)) + if (READ_ONCE(dev_priv->gt.awake)) { + i915_queue_hangcheck(dev_priv); queue_delayed_work(dev_priv->wq, &dev_priv->gt.retire_work, round_jiffies_up_relative(HZ)); + } } static void @@ -3294,7 +2623,6 @@ i915_gem_idle_work_handler(struct work_struct *work) container_of(work, typeof(*dev_priv), gt.idle_work.work); struct drm_device *dev = &dev_priv->drm; struct intel_engine_cs *engine; - unsigned int stuck_engines; bool rearm_hangcheck; if (!READ_ONCE(dev_priv->gt.awake)) @@ -3324,12 +2652,6 @@ i915_gem_idle_work_handler(struct work_struct *work) dev_priv->gt.awake = false; rearm_hangcheck = false; - stuck_engines = intel_kick_waiters(dev_priv); - if (unlikely(stuck_engines)) { - DRM_DEBUG_DRIVER("kicked stuck waiters...missed irq\n"); - dev_priv->gpu_error.missed_irq_rings |= stuck_engines; - } - if (INTEL_GEN(dev_priv) >= 6) gen6_rps_idle(dev_priv); intel_runtime_pm_put(dev_priv); @@ -3343,32 +2665,17 @@ out_rearm: } } -/** - * Ensures that an object will eventually get non-busy by flushing any required - * write domains, emitting any outstanding lazy request and retiring and - * completed requests. - * @obj: object to flush - */ -static int -i915_gem_object_flush_active(struct drm_i915_gem_object *obj) +void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) { - int i; - - if (!obj->active) - return 0; - - for (i = 0; i < I915_NUM_ENGINES; i++) { - struct drm_i915_gem_request *req; - - req = obj->last_read_req[i]; - if (req == NULL) - continue; - - if (i915_gem_request_completed(req)) - i915_gem_object_retire__read(obj, i); - } + struct drm_i915_gem_object *obj = to_intel_bo(gem); + struct drm_i915_file_private *fpriv = file->driver_priv; + struct i915_vma *vma, *vn; - return 0; + mutex_lock(&obj->base.dev->struct_mutex); + list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link) + if (vma->vm->file == fpriv) + i915_vma_close(vma); + mutex_unlock(&obj->base.dev->struct_mutex); } /** @@ -3399,122 +2706,58 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_wait *args = data; + struct intel_rps_client *rps = to_rps_client(file); struct drm_i915_gem_object *obj; - struct drm_i915_gem_request *req[I915_NUM_ENGINES]; - int i, n = 0; - int ret; + unsigned long active; + int idx, ret = 0; if (args->flags != 0) return -EINVAL; - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; - - obj = to_intel_bo(drm_gem_object_lookup(file, args->bo_handle)); - if (&obj->base == NULL) { - mutex_unlock(&dev->struct_mutex); + obj = i915_gem_object_lookup(file, args->bo_handle); + if (!obj) return -ENOENT; - } - - /* Need to make sure the object gets inactive eventually. */ - ret = i915_gem_object_flush_active(obj); - if (ret) - goto out; - - if (!obj->active) - goto out; - - /* Do this after OLR check to make sure we make forward progress polling - * on this IOCTL with a timeout == 0 (like busy ioctl) - */ - if (args->timeout_ns == 0) { - ret = -ETIME; - goto out; - } - - drm_gem_object_unreference(&obj->base); - - for (i = 0; i < I915_NUM_ENGINES; i++) { - if (obj->last_read_req[i] == NULL) - continue; - - req[n++] = i915_gem_request_reference(obj->last_read_req[i]); - } - - mutex_unlock(&dev->struct_mutex); - for (i = 0; i < n; i++) { - if (ret == 0) - ret = __i915_wait_request(req[i], true, - args->timeout_ns > 0 ? &args->timeout_ns : NULL, - to_rps_client(file)); - i915_gem_request_unreference(req[i]); + active = __I915_BO_ACTIVE(obj); + for_each_active(active, idx) { + s64 *timeout = args->timeout_ns >= 0 ? &args->timeout_ns : NULL; + ret = i915_gem_active_wait_unlocked(&obj->last_read[idx], true, + timeout, rps); + if (ret) + break; } - return ret; -out: - drm_gem_object_unreference(&obj->base); - mutex_unlock(&dev->struct_mutex); + i915_gem_object_put_unlocked(obj); return ret; } static int -__i915_gem_object_sync(struct drm_i915_gem_object *obj, - struct intel_engine_cs *to, - struct drm_i915_gem_request *from_req, - struct drm_i915_gem_request **to_req) +__i915_gem_object_sync(struct drm_i915_gem_request *to, + struct drm_i915_gem_request *from) { - struct intel_engine_cs *from; int ret; - from = i915_gem_request_get_engine(from_req); - if (to == from) + if (to->engine == from->engine) return 0; - if (i915_gem_request_completed(from_req)) - return 0; - - if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); - ret = __i915_wait_request(from_req, - i915->mm.interruptible, - NULL, - &i915->rps.semaphores); + if (!i915.semaphores) { + ret = i915_wait_request(from, + from->i915->mm.interruptible, + NULL, + NO_WAITBOOST); if (ret) return ret; - - i915_gem_object_retire_request(obj, from_req); } else { - int idx = intel_ring_sync_index(from, to); - u32 seqno = i915_gem_request_get_seqno(from_req); - - WARN_ON(!to_req); - - if (seqno <= from->semaphore.sync_seqno[idx]) + int idx = intel_engine_sync_index(from->engine, to->engine); + if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx]) return 0; - if (*to_req == NULL) { - struct drm_i915_gem_request *req; - - req = i915_gem_request_alloc(to, NULL); - if (IS_ERR(req)) - return PTR_ERR(req); - - *to_req = req; - } - - trace_i915_gem_ring_sync_to(*to_req, from, from_req); - ret = to->semaphore.sync_to(*to_req, from, seqno); + trace_i915_gem_ring_sync_to(to, from); + ret = to->engine->semaphore.sync_to(to, from); if (ret) return ret; - /* We use last_read_req because sync_to() - * might have just caused seqno wrap under - * the radar. - */ - from->semaphore.sync_seqno[idx] = - i915_gem_request_get_seqno(obj->last_read_req[from->id]); + from->engine->semaphore.sync_seqno[idx] = from->fence.seqno; } return 0; @@ -3524,17 +2767,12 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, * i915_gem_object_sync - sync an object to a ring. * * @obj: object which may be in use on another ring. - * @to: ring we wish to use the object on. May be NULL. - * @to_req: request we wish to use the object for. See below. - * This will be allocated and returned if a request is - * required but not passed in. + * @to: request we are wishing to use * * This code is meant to abstract object synchronization with the GPU. - * Calling with NULL implies synchronizing the object with the CPU - * rather than a particular GPU ring. Conceptually we serialise writes - * between engines inside the GPU. We only allow one engine to write - * into a buffer at any time, but multiple readers. To ensure each has - * a coherent view of memory, we must: + * Conceptually we serialise writes between engines inside the GPU. + * We only allow one engine to write into a buffer at any time, but + * multiple readers. To ensure each has a coherent view of memory, we must: * * - If there is an outstanding write request to the object, the new * request must wait for it to complete (either CPU or in hw, requests @@ -3543,44 +2781,39 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, * - If we are a write request (pending_write_domain is set), the new * request must wait for outstanding read requests to complete. * - * For CPU synchronisation (NULL to) no request is required. For syncing with - * rings to_req must be non-NULL. However, a request does not have to be - * pre-allocated. If *to_req is NULL and sync commands will be emitted then a - * request will be allocated automatically and returned through *to_req. Note - * that it is not guaranteed that commands will be emitted (because the system - * might already be idle). Hence there is no need to create a request that - * might never have any work submitted. Note further that if a request is - * returned in *to_req, it is the responsibility of the caller to submit - * that request (after potentially adding more work to it). - * * Returns 0 if successful, else propagates up the lower layer error. */ int i915_gem_object_sync(struct drm_i915_gem_object *obj, - struct intel_engine_cs *to, - struct drm_i915_gem_request **to_req) + struct drm_i915_gem_request *to) { - const bool readonly = obj->base.pending_write_domain == 0; - struct drm_i915_gem_request *req[I915_NUM_ENGINES]; - int ret, i, n; + struct i915_gem_active *active; + unsigned long active_mask; + int idx; - if (!obj->active) - return 0; + lockdep_assert_held(&obj->base.dev->struct_mutex); - if (to == NULL) - return i915_gem_object_wait_rendering(obj, readonly); + active_mask = i915_gem_object_get_active(obj); + if (!active_mask) + return 0; - n = 0; - if (readonly) { - if (obj->last_write_req) - req[n++] = obj->last_write_req; + if (obj->base.pending_write_domain) { + active = obj->last_read; } else { - for (i = 0; i < I915_NUM_ENGINES; i++) - if (obj->last_read_req[i]) - req[n++] = obj->last_read_req[i]; + active_mask = 1; + active = &obj->last_write; } - for (i = 0; i < n; i++) { - ret = __i915_gem_object_sync(obj, to, req[i], to_req); + + for_each_active(active_mask, idx) { + struct drm_i915_gem_request *request; + int ret; + + request = i915_gem_active_peek(&active[idx], + &obj->base.dev->struct_mutex); + if (!request) + continue; + + ret = __i915_gem_object_sync(to, request); if (ret) return ret; } @@ -3588,30 +2821,9 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj, return 0; } -static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) -{ - u32 old_write_domain, old_read_domains; - - /* Force a pagefault for domain tracking on next user access */ - i915_gem_release_mmap(obj); - - if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) - return; - - old_read_domains = obj->base.read_domains; - old_write_domain = obj->base.write_domain; - - obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT; - obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT; - - trace_i915_gem_object_change_domain(obj, - old_read_domains, - old_write_domain); -} - static void __i915_vma_iounmap(struct i915_vma *vma) { - GEM_BUG_ON(vma->pin_count); + GEM_BUG_ON(i915_vma_is_pinned(vma)); if (vma->iomap == NULL) return; @@ -3620,65 +2832,83 @@ static void __i915_vma_iounmap(struct i915_vma *vma) vma->iomap = NULL; } -static int __i915_vma_unbind(struct i915_vma *vma, bool wait) +int i915_vma_unbind(struct i915_vma *vma) { struct drm_i915_gem_object *obj = vma->obj; - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + unsigned long active; int ret; - if (list_empty(&vma->obj_link)) - return 0; - - if (!drm_mm_node_allocated(&vma->node)) { - i915_gem_vma_destroy(vma); - return 0; - } - - if (vma->pin_count) - return -EBUSY; + /* First wait upon any activity as retiring the request may + * have side-effects such as unpinning or even unbinding this vma. + */ + active = i915_vma_get_active(vma); + if (active) { + int idx; + + /* When a closed VMA is retired, it is unbound - eek. + * In order to prevent it from being recursively closed, + * take a pin on the vma so that the second unbind is + * aborted. + */ + __i915_vma_pin(vma); - BUG_ON(obj->pages == NULL); + for_each_active(active, idx) { + ret = i915_gem_active_retire(&vma->last_read[idx], + &vma->vm->dev->struct_mutex); + if (ret) + break; + } - if (wait) { - ret = i915_gem_object_wait_rendering(obj, false); + __i915_vma_unpin(vma); if (ret) return ret; + + GEM_BUG_ON(i915_vma_is_active(vma)); } - if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { - i915_gem_object_finish_gtt(obj); + if (i915_vma_is_pinned(vma)) + return -EBUSY; + + if (!drm_mm_node_allocated(&vma->node)) + goto destroy; + + GEM_BUG_ON(obj->bind_count == 0); + GEM_BUG_ON(!obj->pages); + if (i915_vma_is_map_and_fenceable(vma)) { /* release the fence reg _after_ flushing */ - ret = i915_gem_object_put_fence(obj); + ret = i915_vma_put_fence(vma); if (ret) return ret; + /* Force a pagefault for domain tracking on next user access */ + i915_gem_release_mmap(obj); + __i915_vma_iounmap(vma); + vma->flags &= ~I915_VMA_CAN_FENCE; } - trace_i915_vma_unbind(vma); - - vma->vm->unbind_vma(vma); - vma->bound = 0; - - list_del_init(&vma->vm_link); - if (vma->is_ggtt) { - if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { - obj->map_and_fenceable = false; - } else if (vma->ggtt_view.pages) { - sg_free_table(vma->ggtt_view.pages); - kfree(vma->ggtt_view.pages); - } - vma->ggtt_view.pages = NULL; + if (likely(!vma->vm->closed)) { + trace_i915_vma_unbind(vma); + vma->vm->unbind_vma(vma); } + vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); drm_mm_remove_node(&vma->node); - i915_gem_vma_destroy(vma); + list_move_tail(&vma->vm_link, &vma->vm->unbound_list); + + if (vma->pages != obj->pages) { + GEM_BUG_ON(!vma->pages); + sg_free_table(vma->pages); + kfree(vma->pages); + } + vma->pages = NULL; /* Since the unbound list is global, only move to that list if * no more VMAs exist. */ - if (list_empty(&obj->vma_list)) - list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); + if (--obj->bind_count == 0) + list_move_tail(&obj->global_list, + &to_i915(obj->base.dev)->mm.unbound_list); /* And finally now the object is completely decoupled from this vma, * we can drop its hold on the backing storage and allow it to be @@ -3686,36 +2916,28 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait) */ i915_gem_object_unpin_pages(obj); - return 0; -} - -int i915_vma_unbind(struct i915_vma *vma) -{ - return __i915_vma_unbind(vma, true); -} +destroy: + if (unlikely(i915_vma_is_closed(vma))) + i915_vma_destroy(vma); -int __i915_vma_unbind_no_wait(struct i915_vma *vma) -{ - return __i915_vma_unbind(vma, false); + return 0; } -int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv) +int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, + bool interruptible) { struct intel_engine_cs *engine; int ret; - lockdep_assert_held(&dev_priv->drm.struct_mutex); - for_each_engine(engine, dev_priv) { if (engine->last_context == NULL) continue; - ret = intel_engine_idle(engine); + ret = intel_engine_idle(engine, interruptible); if (ret) return ret; } - WARN_ON(i915_verify_lists(dev)); return 0; } @@ -3753,128 +2975,87 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma, } /** - * Finds free space in the GTT aperture and binds the object or a view of it - * there. - * @obj: object to bind - * @vm: address space to bind into - * @ggtt_view: global gtt view if applicable - * @alignment: requested alignment + * i915_vma_insert - finds a slot for the vma in its address space + * @vma: the vma + * @size: requested size in bytes (can be larger than the VMA) + * @alignment: required alignment * @flags: mask of PIN_* flags to use + * + * First we try to allocate some free space that meets the requirements for + * the VMA. Failiing that, if the flags permit, it will evict an old VMA, + * preferrably the oldest idle entry to make room for the new VMA. + * + * Returns: + * 0 on success, negative error code otherwise. */ -static struct i915_vma * -i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - const struct i915_ggtt_view *ggtt_view, - unsigned alignment, - uint64_t flags) +static int +i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; - u32 fence_alignment, unfenced_alignment; - u32 search_flag, alloc_flag; + struct drm_i915_private *dev_priv = to_i915(vma->vm->dev); + struct drm_i915_gem_object *obj = vma->obj; u64 start, end; - u64 size, fence_size; - struct i915_vma *vma; int ret; - if (i915_is_ggtt(vm)) { - u32 view_size; - - if (WARN_ON(!ggtt_view)) - return ERR_PTR(-EINVAL); - - view_size = i915_ggtt_view_size(obj, ggtt_view); - - fence_size = i915_gem_get_gtt_size(dev, - view_size, - obj->tiling_mode); - fence_alignment = i915_gem_get_gtt_alignment(dev, - view_size, - obj->tiling_mode, - true); - unfenced_alignment = i915_gem_get_gtt_alignment(dev, - view_size, - obj->tiling_mode, - false); - size = flags & PIN_MAPPABLE ? fence_size : view_size; - } else { - fence_size = i915_gem_get_gtt_size(dev, - obj->base.size, - obj->tiling_mode); - fence_alignment = i915_gem_get_gtt_alignment(dev, - obj->base.size, - obj->tiling_mode, - true); - unfenced_alignment = - i915_gem_get_gtt_alignment(dev, - obj->base.size, - obj->tiling_mode, - false); - size = flags & PIN_MAPPABLE ? fence_size : obj->base.size; - } + GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); + GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); + + size = max(size, vma->size); + if (flags & PIN_MAPPABLE) + size = i915_gem_get_ggtt_size(dev_priv, size, + i915_gem_object_get_tiling(obj)); + + alignment = max(max(alignment, vma->display_alignment), + i915_gem_get_ggtt_alignment(dev_priv, size, + i915_gem_object_get_tiling(obj), + flags & PIN_MAPPABLE)); start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; - end = vm->total; + + end = vma->vm->total; if (flags & PIN_MAPPABLE) - end = min_t(u64, end, ggtt->mappable_end); + end = min_t(u64, end, dev_priv->ggtt.mappable_end); if (flags & PIN_ZONE_4G) end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE); - if (alignment == 0) - alignment = flags & PIN_MAPPABLE ? fence_alignment : - unfenced_alignment; - if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) { - DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n", - ggtt_view ? ggtt_view->type : 0, - alignment); - return ERR_PTR(-EINVAL); - } - /* If binding the object/GGTT view requires more space than the entire * aperture has, reject it early before evicting everything in a vain * attempt to find space. */ if (size > end) { - DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%llu > %s aperture=%llu\n", - ggtt_view ? ggtt_view->type : 0, - size, + DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n", + size, obj->base.size, flags & PIN_MAPPABLE ? "mappable" : "total", end); - return ERR_PTR(-E2BIG); + return -E2BIG; } ret = i915_gem_object_get_pages(obj); if (ret) - return ERR_PTR(ret); + return ret; i915_gem_object_pin_pages(obj); - vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) : - i915_gem_obj_lookup_or_create_vma(obj, vm); - - if (IS_ERR(vma)) - goto err_unpin; - if (flags & PIN_OFFSET_FIXED) { - uint64_t offset = flags & PIN_OFFSET_MASK; - - if (offset & (alignment - 1) || offset + size > end) { + u64 offset = flags & PIN_OFFSET_MASK; + if (offset & (alignment - 1) || offset > end - size) { ret = -EINVAL; - goto err_free_vma; + goto err_unpin; } + vma->node.start = offset; vma->node.size = size; vma->node.color = obj->cache_level; - ret = drm_mm_reserve_node(&vm->mm, &vma->node); + ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node); if (ret) { ret = i915_gem_evict_for_vma(vma); if (ret == 0) - ret = drm_mm_reserve_node(&vm->mm, &vma->node); + ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node); + if (ret) + goto err_unpin; } - if (ret) - goto err_free_vma; } else { + u32 search_flag, alloc_flag; + if (flags & PIN_HIGH) { search_flag = DRM_MM_SEARCH_BELOW; alloc_flag = DRM_MM_CREATE_TOP; @@ -3883,47 +3064,45 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, alloc_flag = DRM_MM_CREATE_DEFAULT; } + /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks, + * so we know that we always have a minimum alignment of 4096. + * The drm_mm range manager is optimised to return results + * with zero alignment, so where possible use the optimal + * path. + */ + if (alignment <= 4096) + alignment = 0; + search_free: - ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, + ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm, + &vma->node, size, alignment, obj->cache_level, start, end, search_flag, alloc_flag); if (ret) { - ret = i915_gem_evict_something(dev, vm, size, alignment, + ret = i915_gem_evict_something(vma->vm, size, alignment, obj->cache_level, start, end, flags); if (ret == 0) goto search_free; - goto err_free_vma; + goto err_unpin; } } - if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) { - ret = -EINVAL; - goto err_remove_node; - } - - trace_i915_vma_bind(vma, flags); - ret = i915_vma_bind(vma, obj->cache_level, flags); - if (ret) - goto err_remove_node; + GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level)); list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); - list_add_tail(&vma->vm_link, &vm->inactive_list); + list_move_tail(&vma->vm_link, &vma->vm->inactive_list); + obj->bind_count++; - return vma; + return 0; -err_remove_node: - drm_mm_remove_node(&vma->node); -err_free_vma: - i915_gem_vma_destroy(vma); - vma = ERR_PTR(ret); err_unpin: i915_gem_object_unpin_pages(obj); - return vma; + return ret; } bool @@ -3968,51 +3147,72 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj, static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) { - uint32_t old_write_domain; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) return; /* No actual flushing is required for the GTT write domain. Writes - * to it immediately go to main memory as far as we know, so there's + * to it "immediately" go to main memory as far as we know, so there's * no chipset flush. It also doesn't land in render cache. * * However, we do have to enforce the order so that all writes through * the GTT land before any writes to the device, such as updates to * the GATT itself. + * + * We also have to wait a bit for the writes to land from the GTT. + * An uncached read (i.e. mmio) seems to be ideal for the round-trip + * timing. This issue has only been observed when switching quickly + * between GTT writes and CPU reads from inside the kernel on recent hw, + * and it appears to only affect discrete GTT blocks (i.e. on LLC + * system agents we cannot reproduce this behaviour). */ wmb(); + if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) + POSTING_READ(RING_ACTHD(dev_priv->engine[RCS].mmio_base)); - old_write_domain = obj->base.write_domain; - obj->base.write_domain = 0; - - intel_fb_obj_flush(obj, false, ORIGIN_GTT); + intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT)); + obj->base.write_domain = 0; trace_i915_gem_object_change_domain(obj, obj->base.read_domains, - old_write_domain); + I915_GEM_DOMAIN_GTT); } /** Flushes the CPU write domain for the object if it's dirty. */ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) { - uint32_t old_write_domain; - if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) return; if (i915_gem_clflush_object(obj, obj->pin_display)) i915_gem_chipset_flush(to_i915(obj->base.dev)); - old_write_domain = obj->base.write_domain; - obj->base.write_domain = 0; - intel_fb_obj_flush(obj, false, ORIGIN_CPU); + obj->base.write_domain = 0; trace_i915_gem_object_change_domain(obj, obj->base.read_domains, - old_write_domain); + I915_GEM_DOMAIN_CPU); +} + +static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) +{ + struct i915_vma *vma; + + list_for_each_entry(vma, &obj->vma_list, obj_link) { + if (!i915_vma_is_ggtt(vma)) + continue; + + if (i915_vma_is_active(vma)) + continue; + + if (!drm_mm_node_allocated(&vma->node)) + continue; + + list_move_tail(&vma->vm_link, &vma->vm->inactive_list); + } } /** @@ -4026,20 +3226,16 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; uint32_t old_write_domain, old_read_domains; - struct i915_vma *vma; int ret; - if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) - return 0; - ret = i915_gem_object_wait_rendering(obj, !write); if (ret) return ret; + if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) + return 0; + /* Flush and acquire obj->pages so that we are coherent through * direct access in memory with previous cached writes through * shmemfs and that our cache domain tracking remains valid. @@ -4080,10 +3276,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) old_write_domain); /* And bump the LRU for this access */ - vma = i915_gem_obj_to_ggtt(obj); - if (vma && drm_mm_node_allocated(&vma->node) && !obj->active) - list_move_tail(&vma->vm_link, - &ggtt->base.inactive_list); + i915_gem_object_bump_inactive_ggtt(obj); return 0; } @@ -4106,9 +3299,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, enum i915_cache_level cache_level) { - struct drm_device *dev = obj->base.dev; - struct i915_vma *vma, *next; - bool bound = false; + struct i915_vma *vma; int ret = 0; if (obj->cache_level == cache_level) @@ -4119,21 +3310,28 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, * catch the issue of the CS prefetch crossing page boundaries and * reading an invalid PTE on older architectures. */ - list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) { +restart: + list_for_each_entry(vma, &obj->vma_list, obj_link) { if (!drm_mm_node_allocated(&vma->node)) continue; - if (vma->pin_count) { + if (i915_vma_is_pinned(vma)) { DRM_DEBUG("can not change the cache level of pinned objects\n"); return -EBUSY; } - if (!i915_gem_valid_gtt_space(vma, cache_level)) { - ret = i915_vma_unbind(vma); - if (ret) - return ret; - } else - bound = true; + if (i915_gem_valid_gtt_space(vma, cache_level)) + continue; + + ret = i915_vma_unbind(vma); + if (ret) + return ret; + + /* As unbinding may affect other elements in the + * obj->vma_list (due to side-effects from retiring + * an active vma), play safe and restart the iterator. + */ + goto restart; } /* We can reuse the existing drm_mm nodes but need to change the @@ -4143,7 +3341,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, * rewrite the PTE in the belief that doing so tramples upon less * state and so involves less work. */ - if (bound) { + if (obj->bind_count) { /* Before we change the PTE, the GPU must not be accessing it. * If we wait upon the object, we know that all the bound * VMA are no longer active. @@ -4152,7 +3350,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, if (ret) return ret; - if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) { + if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) { /* Access to snoopable pages through the GTT is * incoherent and on some machines causes a hard * lockup. Relinquish the CPU mmaping to force @@ -4169,9 +3367,11 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, * dropped the fence as all snoopable access is * supposed to be linear. */ - ret = i915_gem_object_put_fence(obj); - if (ret) - return ret; + list_for_each_entry(vma, &obj->vma_list, obj_link) { + ret = i915_vma_put_fence(vma); + if (ret) + return ret; + } } else { /* We either have incoherent backing store and * so no GTT access or the architecture is fully @@ -4215,8 +3415,8 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_caching *args = data; struct drm_i915_gem_object *obj; - obj = to_intel_bo(drm_gem_object_lookup(file, args->handle)); - if (&obj->base == NULL) + obj = i915_gem_object_lookup(file, args->handle); + if (!obj) return -ENOENT; switch (obj->cache_level) { @@ -4234,7 +3434,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, break; } - drm_gem_object_unreference_unlocked(&obj->base); + i915_gem_object_put_unlocked(obj); return 0; } @@ -4276,15 +3476,15 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, if (ret) goto rpm_put; - obj = to_intel_bo(drm_gem_object_lookup(file, args->handle)); - if (&obj->base == NULL) { + obj = i915_gem_object_lookup(file, args->handle); + if (!obj) { ret = -ENOENT; goto unlock; } ret = i915_gem_object_set_cache_level(obj, level); - drm_gem_object_unreference(&obj->base); + i915_gem_object_put(obj); unlock: mutex_unlock(&dev->struct_mutex); rpm_put: @@ -4298,11 +3498,12 @@ rpm_put: * Can be called from an uninterruptible phase (modesetting) and allows * any flushes to be pipelined (for pageflips). */ -int +struct i915_vma * i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, const struct i915_ggtt_view *view) { + struct i915_vma *vma; u32 old_read_domains, old_write_domain; int ret; @@ -4322,19 +3523,31 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, */ ret = i915_gem_object_set_cache_level(obj, HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE); - if (ret) + if (ret) { + vma = ERR_PTR(ret); goto err_unpin_display; + } /* As the user may map the buffer once pinned in the display plane * (e.g. libkms for the bootup splash), we have to ensure that we - * always use map_and_fenceable for all scanout buffers. + * always use map_and_fenceable for all scanout buffers. However, + * it may simply be too big to fit into mappable, in which case + * put it anyway and hope that userspace can cope (but always first + * try to preserve the existing ABI). */ - ret = i915_gem_object_ggtt_pin(obj, view, alignment, - view->type == I915_GGTT_VIEW_NORMAL ? - PIN_MAPPABLE : 0); - if (ret) + vma = ERR_PTR(-ENOSPC); + if (view->type == I915_GGTT_VIEW_NORMAL) + vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, + PIN_MAPPABLE | PIN_NONBLOCK); + if (IS_ERR(vma)) + vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 0); + if (IS_ERR(vma)) goto err_unpin_display; + vma->display_alignment = max_t(u64, vma->display_alignment, alignment); + + WARN_ON(obj->pin_display > i915_vma_pin_count(vma)); + i915_gem_object_flush_cpu_write_domain(obj); old_write_domain = obj->base.write_domain; @@ -4350,23 +3563,28 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, old_read_domains, old_write_domain); - return 0; + return vma; err_unpin_display: obj->pin_display--; - return ret; + return vma; } void -i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, - const struct i915_ggtt_view *view) +i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) { - if (WARN_ON(obj->pin_display == 0)) + if (WARN_ON(vma->obj->pin_display == 0)) return; - i915_gem_object_ggtt_unpin_view(obj, view); + if (--vma->obj->pin_display == 0) + vma->display_alignment = 0; - obj->pin_display--; + /* Bump the LRU to try and avoid premature eviction whilst flipping */ + if (!i915_vma_is_active(vma)) + list_move_tail(&vma->vm_link, &vma->vm->inactive_list); + + i915_vma_unpin(vma); + WARN_ON(vma->obj->pin_display > i915_vma_pin_count(vma)); } /** @@ -4383,13 +3601,13 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) uint32_t old_write_domain, old_read_domains; int ret; - if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) - return 0; - ret = i915_gem_object_wait_rendering(obj, !write); if (ret) return ret; + if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) + return 0; + i915_gem_object_flush_gtt_write_domain(obj); old_write_domain = obj->base.write_domain; @@ -4464,28 +3682,31 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) target = request; } if (target) - i915_gem_request_reference(target); + i915_gem_request_get(target); spin_unlock(&file_priv->mm.lock); if (target == NULL) return 0; - ret = __i915_wait_request(target, true, NULL, NULL); - i915_gem_request_unreference(target); + ret = i915_wait_request(target, true, NULL, NULL); + i915_gem_request_put(target); return ret; } static bool -i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) +i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) { - struct drm_i915_gem_object *obj = vma->obj; + if (!drm_mm_node_allocated(&vma->node)) + return false; + + if (vma->node.size < size) + return true; - if (alignment && - vma->node.start & (alignment - 1)) + if (alignment && vma->node.start & (alignment - 1)) return true; - if (flags & PIN_MAPPABLE && !obj->map_and_fenceable) + if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) return true; if (flags & PIN_OFFSET_BIAS && @@ -4502,135 +3723,208 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) { struct drm_i915_gem_object *obj = vma->obj; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); bool mappable, fenceable; u32 fence_size, fence_alignment; - fence_size = i915_gem_get_gtt_size(obj->base.dev, - obj->base.size, - obj->tiling_mode); - fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev, - obj->base.size, - obj->tiling_mode, - true); + fence_size = i915_gem_get_ggtt_size(dev_priv, + vma->size, + i915_gem_object_get_tiling(obj)); + fence_alignment = i915_gem_get_ggtt_alignment(dev_priv, + vma->size, + i915_gem_object_get_tiling(obj), + true); fenceable = (vma->node.size == fence_size && (vma->node.start & (fence_alignment - 1)) == 0); mappable = (vma->node.start + fence_size <= - to_i915(obj->base.dev)->ggtt.mappable_end); + dev_priv->ggtt.mappable_end); - obj->map_and_fenceable = mappable && fenceable; + if (mappable && fenceable) + vma->flags |= I915_VMA_CAN_FENCE; + else + vma->flags &= ~I915_VMA_CAN_FENCE; } -static int -i915_gem_object_do_pin(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - const struct i915_ggtt_view *ggtt_view, - uint32_t alignment, - uint64_t flags) +int __i915_vma_do_pin(struct i915_vma *vma, + u64 size, u64 alignment, u64 flags) { - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - struct i915_vma *vma; - unsigned bound; + unsigned int bound = vma->flags; int ret; - if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base)) - return -ENODEV; + GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0); + GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma)); - if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm))) - return -EINVAL; + if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { + ret = -EBUSY; + goto err; + } - if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE)) - return -EINVAL; + if ((bound & I915_VMA_BIND_MASK) == 0) { + ret = i915_vma_insert(vma, size, alignment, flags); + if (ret) + goto err; + } - if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) - return -EINVAL; + ret = i915_vma_bind(vma, vma->obj->cache_level, flags); + if (ret) + goto err; - vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) : - i915_gem_obj_to_vma(obj, vm); + if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) + __i915_vma_set_map_and_fenceable(vma); - if (vma) { - if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) - return -EBUSY; + GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); + return 0; - if (i915_vma_misplaced(vma, alignment, flags)) { - WARN(vma->pin_count, - "bo is already pinned in %s with incorrect alignment:" - " offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d," - " obj->map_and_fenceable=%d\n", - ggtt_view ? "ggtt" : "ppgtt", - upper_32_bits(vma->node.start), - lower_32_bits(vma->node.start), - alignment, - !!(flags & PIN_MAPPABLE), - obj->map_and_fenceable); - ret = i915_vma_unbind(vma); - if (ret) - return ret; +err: + __i915_vma_unpin(vma); + return ret; +} - vma = NULL; - } - } +struct i915_vma * +i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view, + u64 size, + u64 alignment, + u64 flags) +{ + struct i915_address_space *vm = &to_i915(obj->base.dev)->ggtt.base; + struct i915_vma *vma; + int ret; - bound = vma ? vma->bound : 0; - if (vma == NULL || !drm_mm_node_allocated(&vma->node)) { - vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment, - flags); - if (IS_ERR(vma)) - return PTR_ERR(vma); - } else { - ret = i915_vma_bind(vma, obj->cache_level, flags); + vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view); + if (IS_ERR(vma)) + return vma; + + if (i915_vma_misplaced(vma, size, alignment, flags)) { + if (flags & PIN_NONBLOCK && + (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) + return ERR_PTR(-ENOSPC); + + WARN(i915_vma_is_pinned(vma), + "bo is already pinned in ggtt with incorrect alignment:" + " offset=%08x, req.alignment=%llx," + " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n", + i915_ggtt_offset(vma), alignment, + !!(flags & PIN_MAPPABLE), + i915_vma_is_map_and_fenceable(vma)); + ret = i915_vma_unbind(vma); if (ret) - return ret; + return ERR_PTR(ret); } - if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL && - (bound ^ vma->bound) & GLOBAL_BIND) { - __i915_vma_set_map_and_fenceable(vma); - WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable); - } + ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); + if (ret) + return ERR_PTR(ret); - vma->pin_count++; - return 0; + return vma; } -int -i915_gem_object_pin(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - uint32_t alignment, - uint64_t flags) +static __always_inline unsigned int __busy_read_flag(unsigned int id) { - return i915_gem_object_do_pin(obj, vm, - i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL, - alignment, flags); + /* Note that we could alias engines in the execbuf API, but + * that would be very unwise as it prevents userspace from + * fine control over engine selection. Ahem. + * + * This should be something like EXEC_MAX_ENGINE instead of + * I915_NUM_ENGINES. + */ + BUILD_BUG_ON(I915_NUM_ENGINES > 16); + return 0x10000 << id; } -int -i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, - const struct i915_ggtt_view *view, - uint32_t alignment, - uint64_t flags) +static __always_inline unsigned int __busy_write_id(unsigned int id) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; + /* The uABI guarantees an active writer is also amongst the read + * engines. This would be true if we accessed the activity tracking + * under the lock, but as we perform the lookup of the object and + * its activity locklessly we can not guarantee that the last_write + * being active implies that we have set the same engine flag from + * last_read - hence we always set both read and write busy for + * last_write. + */ + return id | __busy_read_flag(id); +} - BUG_ON(!view); +static __always_inline unsigned int +__busy_set_if_active(const struct i915_gem_active *active, + unsigned int (*flag)(unsigned int id)) +{ + struct drm_i915_gem_request *request; + + request = rcu_dereference(active->request); + if (!request || i915_gem_request_completed(request)) + return 0; - return i915_gem_object_do_pin(obj, &ggtt->base, view, - alignment, flags | PIN_GLOBAL); + /* This is racy. See __i915_gem_active_get_rcu() for an in detail + * discussion of how to handle the race correctly, but for reporting + * the busy state we err on the side of potentially reporting the + * wrong engine as being busy (but we guarantee that the result + * is at least self-consistent). + * + * As we use SLAB_DESTROY_BY_RCU, the request may be reallocated + * whilst we are inspecting it, even under the RCU read lock as we are. + * This means that there is a small window for the engine and/or the + * seqno to have been overwritten. The seqno will always be in the + * future compared to the intended, and so we know that if that + * seqno is idle (on whatever engine) our request is idle and the + * return 0 above is correct. + * + * The issue is that if the engine is switched, it is just as likely + * to report that it is busy (but since the switch happened, we know + * the request should be idle). So there is a small chance that a busy + * result is actually the wrong engine. + * + * So why don't we care? + * + * For starters, the busy ioctl is a heuristic that is by definition + * racy. Even with perfect serialisation in the driver, the hardware + * state is constantly advancing - the state we report to the user + * is stale. + * + * The critical information for the busy-ioctl is whether the object + * is idle as userspace relies on that to detect whether its next + * access will stall, or if it has missed submitting commands to + * the hardware allowing the GPU to stall. We never generate a + * false-positive for idleness, thus busy-ioctl is reliable at the + * most fundamental level, and we maintain the guarantee that a + * busy object left to itself will eventually become idle (and stay + * idle!). + * + * We allow ourselves the leeway of potentially misreporting the busy + * state because that is an optimisation heuristic that is constantly + * in flux. Being quickly able to detect the busy/idle state is much + * more important than accurate logging of exactly which engines were + * busy. + * + * For accuracy in reporting the engine, we could use + * + * result = 0; + * request = __i915_gem_active_get_rcu(active); + * if (request) { + * if (!i915_gem_request_completed(request)) + * result = flag(request->engine->exec_id); + * i915_gem_request_put(request); + * } + * + * but that still remains susceptible to both hardware and userspace + * races. So we accept making the result of that race slightly worse, + * given the rarity of the race and its low impact on the result. + */ + return flag(READ_ONCE(request->engine->exec_id)); } -void -i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, - const struct i915_ggtt_view *view) +static __always_inline unsigned int +busy_check_reader(const struct i915_gem_active *active) { - struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view); - - WARN_ON(vma->pin_count == 0); - WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view)); + return __busy_set_if_active(active, __busy_read_flag); +} - --vma->pin_count; +static __always_inline unsigned int +busy_check_writer(const struct i915_gem_active *active) +{ + return __busy_set_if_active(active, __busy_write_id); } int @@ -4639,47 +3933,64 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_busy *args = data; struct drm_i915_gem_object *obj; - int ret; + unsigned long active; - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; + obj = i915_gem_object_lookup(file, args->handle); + if (!obj) + return -ENOENT; - obj = to_intel_bo(drm_gem_object_lookup(file, args->handle)); - if (&obj->base == NULL) { - ret = -ENOENT; - goto unlock; - } + args->busy = 0; + active = __I915_BO_ACTIVE(obj); + if (active) { + int idx; - /* Count all active objects as busy, even if they are currently not used - * by the gpu. Users of this interface expect objects to eventually - * become non-busy without any further actions, therefore emit any - * necessary flushes here. - */ - ret = i915_gem_object_flush_active(obj); - if (ret) - goto unref; + /* Yes, the lookups are intentionally racy. + * + * First, we cannot simply rely on __I915_BO_ACTIVE. We have + * to regard the value as stale and as our ABI guarantees + * forward progress, we confirm the status of each active + * request with the hardware. + * + * Even though we guard the pointer lookup by RCU, that only + * guarantees that the pointer and its contents remain + * dereferencable and does *not* mean that the request we + * have is the same as the one being tracked by the object. + * + * Consider that we lookup the request just as it is being + * retired and freed. We take a local copy of the pointer, + * but before we add its engine into the busy set, the other + * thread reallocates it and assigns it to a task on another + * engine with a fresh and incomplete seqno. Guarding against + * that requires careful serialisation and reference counting, + * i.e. using __i915_gem_active_get_request_rcu(). We don't, + * instead we expect that if the result is busy, which engines + * are busy is not completely reliable - we only guarantee + * that the object was busy. + */ + rcu_read_lock(); - args->busy = 0; - if (obj->active) { - int i; + for_each_active(active, idx) + args->busy |= busy_check_reader(&obj->last_read[idx]); - for (i = 0; i < I915_NUM_ENGINES; i++) { - struct drm_i915_gem_request *req; + /* For ABI sanity, we only care that the write engine is in + * the set of read engines. This should be ensured by the + * ordering of setting last_read/last_write in + * i915_vma_move_to_active(), and then in reverse in retire. + * However, for good measure, we always report the last_write + * request as a busy read as well as being a busy write. + * + * We don't care that the set of active read/write engines + * may change during construction of the result, as it is + * equally liable to change before userspace can inspect + * the result. + */ + args->busy |= busy_check_writer(&obj->last_write); - req = obj->last_read_req[i]; - if (req) - args->busy |= 1 << (16 + req->engine->exec_id); - } - if (obj->last_write_req) - args->busy |= obj->last_write_req->engine->exec_id; + rcu_read_unlock(); } -unref: - drm_gem_object_unreference(&obj->base); -unlock: - mutex_unlock(&dev->struct_mutex); - return ret; + i915_gem_object_put_unlocked(obj); + return 0; } int @@ -4710,19 +4021,14 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - obj = to_intel_bo(drm_gem_object_lookup(file_priv, args->handle)); - if (&obj->base == NULL) { + obj = i915_gem_object_lookup(file_priv, args->handle); + if (!obj) { ret = -ENOENT; goto unlock; } - if (i915_gem_obj_is_pinned(obj)) { - ret = -EINVAL; - goto out; - } - if (obj->pages && - obj->tiling_mode != I915_TILING_NONE && + i915_gem_object_is_tiled(obj) && dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { if (obj->madv == I915_MADV_WILLNEED) i915_gem_object_unpin_pages(obj); @@ -4739,8 +4045,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, args->retained = obj->madv != __I915_MADV_PURGED; -out: - drm_gem_object_unreference(&obj->base); + i915_gem_object_put(obj); unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -4753,14 +4058,17 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, INIT_LIST_HEAD(&obj->global_list); for (i = 0; i < I915_NUM_ENGINES; i++) - INIT_LIST_HEAD(&obj->engine_list[i]); + init_request_active(&obj->last_read[i], + i915_gem_object_retire__read); + init_request_active(&obj->last_write, + i915_gem_object_retire__write); INIT_LIST_HEAD(&obj->obj_exec_link); INIT_LIST_HEAD(&obj->vma_list); INIT_LIST_HEAD(&obj->batch_pool_link); obj->ops = ops; - obj->fence_reg = I915_FENCE_REG_NONE; + obj->frontbuffer_ggtt_origin = ORIGIN_GTT; obj->madv = I915_MADV_WILLNEED; i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size); @@ -4865,33 +4173,31 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) trace_i915_gem_object_destroy(obj); + /* All file-owned VMA should have been released by this point through + * i915_gem_close_object(), or earlier by i915_gem_context_close(). + * However, the object may also be bound into the global GTT (e.g. + * older GPUs without per-process support, or for direct access through + * the GTT either for the user or for scanout). Those VMA still need to + * unbound now. + */ list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) { - int ret; - - vma->pin_count = 0; - ret = i915_vma_unbind(vma); - if (WARN_ON(ret == -ERESTARTSYS)) { - bool was_interruptible; - - was_interruptible = dev_priv->mm.interruptible; - dev_priv->mm.interruptible = false; - - WARN_ON(i915_vma_unbind(vma)); - - dev_priv->mm.interruptible = was_interruptible; - } + GEM_BUG_ON(!i915_vma_is_ggtt(vma)); + GEM_BUG_ON(i915_vma_is_active(vma)); + vma->flags &= ~I915_VMA_PIN_MASK; + i915_vma_close(vma); } + GEM_BUG_ON(obj->bind_count); /* Stolen objects don't hold a ref, but do hold pin count. Fix that up * before progressing. */ if (obj->stolen) i915_gem_object_unpin_pages(obj); - WARN_ON(obj->frontbuffer_bits); + WARN_ON(atomic_read(&obj->frontbuffer_bits)); if (obj->pages && obj->madv == I915_MADV_WILLNEED && dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES && - obj->tiling_mode != I915_TILING_NONE) + i915_gem_object_is_tiled(obj)) i915_gem_object_unpin_pages(obj); if (WARN_ON(obj->pages_pin_count)) @@ -4899,7 +4205,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) if (discard_backing_storage(obj)) obj->madv = I915_MADV_DONTNEED; i915_gem_object_put_pages(obj); - i915_gem_object_free_mmap_offset(obj); BUG_ON(obj->pages); @@ -4918,71 +4223,33 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) intel_runtime_pm_put(dev_priv); } -struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, - struct i915_address_space *vm) -{ - struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, obj_link) { - if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL && - vma->vm == vm) - return vma; - } - return NULL; -} - -struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, - const struct i915_ggtt_view *view) -{ - struct i915_vma *vma; - - GEM_BUG_ON(!view); - - list_for_each_entry(vma, &obj->vma_list, obj_link) - if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view)) - return vma; - return NULL; -} - -void i915_gem_vma_destroy(struct i915_vma *vma) -{ - WARN_ON(vma->node.allocated); - - /* Keep the vma as a placeholder in the execbuffer reservation lists */ - if (!list_empty(&vma->exec_list)) - return; - - if (!vma->is_ggtt) - i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); - - list_del(&vma->obj_link); - - kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma); -} - -static void -i915_gem_stop_engines(struct drm_device *dev) +int i915_gem_suspend(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_engine_cs *engine; - - for_each_engine(engine, dev_priv) - dev_priv->gt.stop_engine(engine); -} + int ret; -int -i915_gem_suspend(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - int ret = 0; + intel_suspend_gt_powersave(dev_priv); mutex_lock(&dev->struct_mutex); - ret = i915_gem_wait_for_idle(dev_priv); + + /* We have to flush all the executing contexts to main memory so + * that they can saved in the hibernation image. To ensure the last + * context image is coherent, we have to switch away from it. That + * leaves the dev_priv->kernel_context still active when + * we actually suspend, and its image in memory may not match the GPU + * state. Fortunately, the kernel_context is disposable and we do + * not rely on its state. + */ + ret = i915_gem_switch_to_kernel_context(dev_priv); + if (ret) + goto err; + + ret = i915_gem_wait_for_idle(dev_priv, true); if (ret) goto err; i915_gem_retire_requests(dev_priv); - i915_gem_stop_engines(dev); i915_gem_context_lost(dev_priv); mutex_unlock(&dev->struct_mutex); @@ -5002,6 +4269,23 @@ err: return ret; } +void i915_gem_resume(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + mutex_lock(&dev->struct_mutex); + i915_gem_restore_gtt_mappings(dev); + + /* As we didn't flush the kernel context before suspend, we cannot + * guarantee that the context image is complete. So let's just reset + * it and start again. + */ + if (i915.enable_execlists) + intel_lr_context_reset(dev_priv, dev_priv->kernel_context); + + mutex_unlock(&dev->struct_mutex); +} + void i915_gem_init_swizzling(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -5054,53 +4338,6 @@ static void init_unused_rings(struct drm_device *dev) } } -int i915_gem_init_engines(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - int ret; - - ret = intel_init_render_ring_buffer(dev); - if (ret) - return ret; - - if (HAS_BSD(dev)) { - ret = intel_init_bsd_ring_buffer(dev); - if (ret) - goto cleanup_render_ring; - } - - if (HAS_BLT(dev)) { - ret = intel_init_blt_ring_buffer(dev); - if (ret) - goto cleanup_bsd_ring; - } - - if (HAS_VEBOX(dev)) { - ret = intel_init_vebox_ring_buffer(dev); - if (ret) - goto cleanup_blt_ring; - } - - if (HAS_BSD2(dev)) { - ret = intel_init_bsd2_ring_buffer(dev); - if (ret) - goto cleanup_vebox_ring; - } - - return 0; - -cleanup_vebox_ring: - intel_cleanup_engine(&dev_priv->engine[VECS]); -cleanup_blt_ring: - intel_cleanup_engine(&dev_priv->engine[BCS]); -cleanup_bsd_ring: - intel_cleanup_engine(&dev_priv->engine[VCS]); -cleanup_render_ring: - intel_cleanup_engine(&dev_priv->engine[RCS]); - - return ret; -} - int i915_gem_init_hw(struct drm_device *dev) { @@ -5167,6 +4404,27 @@ out: return ret; } +bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value) +{ + if (INTEL_INFO(dev_priv)->gen < 6) + return false; + + /* TODO: make semaphores and Execlists play nicely together */ + if (i915.enable_execlists) + return false; + + if (value >= 0) + return value; + +#ifdef CONFIG_INTEL_IOMMU + /* Enable semaphores on SNB when IO remapping is off */ + if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped) + return false; +#endif + + return true; +} + int i915_gem_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -5175,15 +4433,9 @@ int i915_gem_init(struct drm_device *dev) mutex_lock(&dev->struct_mutex); if (!i915.enable_execlists) { - dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission; - dev_priv->gt.init_engines = i915_gem_init_engines; - dev_priv->gt.cleanup_engine = intel_cleanup_engine; - dev_priv->gt.stop_engine = intel_stop_engine; + dev_priv->gt.cleanup_engine = intel_engine_cleanup; } else { - dev_priv->gt.execbuf_submit = intel_execlists_submission; - dev_priv->gt.init_engines = intel_logical_rings_init; dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup; - dev_priv->gt.stop_engine = intel_logical_ring_stop; } /* This is just a security blanket to placate dragons. @@ -5195,19 +4447,22 @@ int i915_gem_init(struct drm_device *dev) intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); i915_gem_init_userptr(dev_priv); - i915_gem_init_ggtt(dev); + + ret = i915_gem_init_ggtt(dev_priv); + if (ret) + goto out_unlock; ret = i915_gem_context_init(dev); if (ret) goto out_unlock; - ret = dev_priv->gt.init_engines(dev); + ret = intel_engines_init(dev); if (ret) goto out_unlock; ret = i915_gem_init_hw(dev); if (ret == -EIO) { - /* Allow ring initialisation to fail by marking the GPU as + /* Allow engine initialisation to fail by marking the GPU as * wedged. But we only want to do this where the GPU is angry, * for all other failure, such as an allocation failure, bail. */ @@ -5236,7 +4491,6 @@ i915_gem_cleanup_engines(struct drm_device *dev) static void init_engine_lists(struct intel_engine_cs *engine) { - INIT_LIST_HEAD(&engine->active_list); INIT_LIST_HEAD(&engine->request_list); } @@ -5244,6 +4498,7 @@ void i915_gem_load_init_fences(struct drm_i915_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; + int i; if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) @@ -5259,6 +4514,13 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv) I915_READ(vgtif_reg(avail_rs.fence_num)); /* Initialize fence registers to zero */ + for (i = 0; i < dev_priv->num_fence_regs; i++) { + struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i]; + + fence->i915 = dev_priv; + fence->id = i; + list_add_tail(&fence->link, &dev_priv->mm.fence_list); + } i915_gem_restore_fences(dev); i915_gem_detect_bit_6_swizzle(dev); @@ -5283,18 +4545,17 @@ i915_gem_load_init(struct drm_device *dev) dev_priv->requests = kmem_cache_create("i915_gem_request", sizeof(struct drm_i915_gem_request), 0, - SLAB_HWCACHE_ALIGN, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT | + SLAB_DESTROY_BY_RCU, NULL); - INIT_LIST_HEAD(&dev_priv->vm_list); INIT_LIST_HEAD(&dev_priv->context_list); INIT_LIST_HEAD(&dev_priv->mm.unbound_list); INIT_LIST_HEAD(&dev_priv->mm.bound_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); for (i = 0; i < I915_NUM_ENGINES; i++) init_engine_lists(&dev_priv->engine[i]); - for (i = 0; i < I915_MAX_NUM_FENCES; i++) - INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); INIT_DELAYED_WORK(&dev_priv->gt.retire_work, i915_gem_retire_work_handler); INIT_DELAYED_WORK(&dev_priv->gt.idle_work, @@ -5304,13 +4565,11 @@ i915_gem_load_init(struct drm_device *dev) dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; - INIT_LIST_HEAD(&dev_priv->mm.fence_list); - init_waitqueue_head(&dev_priv->pending_flip_queue); dev_priv->mm.interruptible = true; - mutex_init(&dev_priv->fb_tracking.lock); + spin_lock_init(&dev_priv->fb_tracking.lock); } void i915_gem_load_cleanup(struct drm_device *dev) @@ -5320,6 +4579,9 @@ void i915_gem_load_cleanup(struct drm_device *dev) kmem_cache_destroy(dev_priv->requests); kmem_cache_destroy(dev_priv->vmas); kmem_cache_destroy(dev_priv->objects); + + /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */ + rcu_barrier(); } int i915_gem_freeze_late(struct drm_i915_private *dev_priv) @@ -5353,21 +4615,15 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv) void i915_gem_release(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_i915_gem_request *request; /* Clean up our request list when the client is going away, so that * later retire_requests won't dereference our soon-to-be-gone * file_priv. */ spin_lock(&file_priv->mm.lock); - while (!list_empty(&file_priv->mm.request_list)) { - struct drm_i915_gem_request *request; - - request = list_first_entry(&file_priv->mm.request_list, - struct drm_i915_gem_request, - client_list); - list_del(&request->client_list); + list_for_each_entry(request, &file_priv->mm.request_list, client_list) request->file_priv = NULL; - } spin_unlock(&file_priv->mm.lock); if (!list_empty(&file_priv->rps.link)) { @@ -5396,7 +4652,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file) spin_lock_init(&file_priv->mm.lock); INIT_LIST_HEAD(&file_priv->mm.request_list); - file_priv->bsd_ring = -1; + file_priv->bsd_engine = -1; ret = i915_gem_context_open(dev, file); if (ret) @@ -5418,118 +4674,24 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, struct drm_i915_gem_object *new, unsigned frontbuffer_bits) { + /* Control of individual bits within the mask are guarded by + * the owning plane->mutex, i.e. we can never see concurrent + * manipulation of individual bits. But since the bitfield as a whole + * is updated using RMW, we need to use atomics in order to update + * the bits. + */ + BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > + sizeof(atomic_t) * BITS_PER_BYTE); + if (old) { - WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex)); - WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits)); - old->frontbuffer_bits &= ~frontbuffer_bits; + WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits)); + atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits); } if (new) { - WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex)); - WARN_ON(new->frontbuffer_bits & frontbuffer_bits); - new->frontbuffer_bits |= frontbuffer_bits; - } -} - -/* All the new VM stuff */ -u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, - struct i915_address_space *vm) -{ - struct drm_i915_private *dev_priv = to_i915(o->base.dev); - struct i915_vma *vma; - - WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); - - list_for_each_entry(vma, &o->vma_list, obj_link) { - if (vma->is_ggtt && - vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) - continue; - if (vma->vm == vm) - return vma->node.start; + WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits); + atomic_or(frontbuffer_bits, &new->frontbuffer_bits); } - - WARN(1, "%s vma for this object not found.\n", - i915_is_ggtt(vm) ? "global" : "ppgtt"); - return -1; -} - -u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, - const struct i915_ggtt_view *view) -{ - struct i915_vma *vma; - - list_for_each_entry(vma, &o->vma_list, obj_link) - if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view)) - return vma->node.start; - - WARN(1, "global vma for this object not found. (view=%u)\n", view->type); - return -1; -} - -bool i915_gem_obj_bound(struct drm_i915_gem_object *o, - struct i915_address_space *vm) -{ - struct i915_vma *vma; - - list_for_each_entry(vma, &o->vma_list, obj_link) { - if (vma->is_ggtt && - vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) - continue; - if (vma->vm == vm && drm_mm_node_allocated(&vma->node)) - return true; - } - - return false; -} - -bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, - const struct i915_ggtt_view *view) -{ - struct i915_vma *vma; - - list_for_each_entry(vma, &o->vma_list, obj_link) - if (vma->is_ggtt && - i915_ggtt_view_equal(&vma->ggtt_view, view) && - drm_mm_node_allocated(&vma->node)) - return true; - - return false; -} - -bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o) -{ - struct i915_vma *vma; - - list_for_each_entry(vma, &o->vma_list, obj_link) - if (drm_mm_node_allocated(&vma->node)) - return true; - - return false; -} - -unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o) -{ - struct i915_vma *vma; - - GEM_BUG_ON(list_empty(&o->vma_list)); - - list_for_each_entry(vma, &o->vma_list, obj_link) { - if (vma->is_ggtt && - vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) - return vma->node.size; - } - - return 0; -} - -bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) -{ - struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, obj_link) - if (vma->pin_count > 0) - return true; - - return false; } /* Like i915_gem_object_get_page(), but mark the returned page dirty */ @@ -5584,6 +4746,6 @@ i915_gem_object_create_from_data(struct drm_device *dev, return obj; fail: - drm_gem_object_unreference(&obj->base); + i915_gem_object_put(obj); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c index 3752d5daa4b2..ed989596d9a3 100644 --- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c +++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c @@ -41,15 +41,15 @@ /** * i915_gem_batch_pool_init() - initialize a batch buffer pool - * @dev: the drm device + * @engine: the associated request submission engine * @pool: the batch buffer pool */ -void i915_gem_batch_pool_init(struct drm_device *dev, +void i915_gem_batch_pool_init(struct intel_engine_cs *engine, struct i915_gem_batch_pool *pool) { int n; - pool->dev = dev; + pool->engine = engine; for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) INIT_LIST_HEAD(&pool->cache_list[n]); @@ -65,18 +65,17 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool) { int n; - WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex)); + lockdep_assert_held(&pool->engine->i915->drm.struct_mutex); for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { - while (!list_empty(&pool->cache_list[n])) { - struct drm_i915_gem_object *obj = - list_first_entry(&pool->cache_list[n], - struct drm_i915_gem_object, - batch_pool_link); - - list_del(&obj->batch_pool_link); - drm_gem_object_unreference(&obj->base); - } + struct drm_i915_gem_object *obj, *next; + + list_for_each_entry_safe(obj, next, + &pool->cache_list[n], + batch_pool_link) + i915_gem_object_put(obj); + + INIT_LIST_HEAD(&pool->cache_list[n]); } } @@ -102,7 +101,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, struct list_head *list; int n; - WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex)); + lockdep_assert_held(&pool->engine->i915->drm.struct_mutex); /* Compute a power-of-two bucket, but throw everything greater than * 16KiB into the same bucket: i.e. the the buckets hold objects of @@ -115,13 +114,14 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, list_for_each_entry_safe(tmp, next, list, batch_pool_link) { /* The batches are strictly LRU ordered */ - if (tmp->active) + if (!i915_gem_active_is_idle(&tmp->last_read[pool->engine->id], + &tmp->base.dev->struct_mutex)) break; /* While we're looping, do some clean up */ if (tmp->madv == __I915_MADV_PURGED) { list_del(&tmp->batch_pool_link); - drm_gem_object_unreference(&tmp->base); + i915_gem_object_put(tmp); continue; } @@ -134,7 +134,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, if (obj == NULL) { int ret; - obj = i915_gem_object_create(pool->dev, size); + obj = i915_gem_object_create(&pool->engine->i915->drm, size); if (IS_ERR(obj)) return obj; diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.h b/drivers/gpu/drm/i915/i915_gem_batch_pool.h index 848e90703eed..10d5ac4c00d3 100644 --- a/drivers/gpu/drm/i915/i915_gem_batch_pool.h +++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.h @@ -27,13 +27,15 @@ #include "i915_drv.h" +struct intel_engine_cs; + struct i915_gem_batch_pool { - struct drm_device *dev; + struct intel_engine_cs *engine; struct list_head cache_list[4]; }; /* i915_gem_batch_pool.c */ -void i915_gem_batch_pool_init(struct drm_device *dev, +void i915_gem_batch_pool_init(struct intel_engine_cs *engine, struct i915_gem_batch_pool *pool); void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool); struct drm_i915_gem_object* diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 3c97f0e7a003..35950ee46a1d 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -134,21 +134,6 @@ static int get_context_size(struct drm_i915_private *dev_priv) return ret; } -static void i915_gem_context_clean(struct i915_gem_context *ctx) -{ - struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; - struct i915_vma *vma, *next; - - if (!ppgtt) - return; - - list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list, - vm_link) { - if (WARN_ON(__i915_vma_unbind_no_wait(vma))) - break; - } -} - void i915_gem_context_free(struct kref *ctx_ref) { struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); @@ -156,13 +141,7 @@ void i915_gem_context_free(struct kref *ctx_ref) lockdep_assert_held(&ctx->i915->drm.struct_mutex); trace_i915_context_free(ctx); - - /* - * This context is going away and we need to remove all VMAs still - * around. This is to handle imported shared objects for which - * destructor did not run when their handles were closed. - */ - i915_gem_context_clean(ctx); + GEM_BUG_ON(!ctx->closed); i915_ppgtt_put(ctx->ppgtt); @@ -173,12 +152,13 @@ void i915_gem_context_free(struct kref *ctx_ref) continue; WARN_ON(ce->pin_count); - if (ce->ringbuf) - intel_ringbuffer_free(ce->ringbuf); + if (ce->ring) + intel_ring_free(ce->ring); - drm_gem_object_unreference(&ce->state->base); + i915_vma_put(ce->state); } + put_pid(ctx->pid); list_del(&ctx->link); ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id); @@ -216,7 +196,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC); /* Failure shouldn't ever happen this early */ if (WARN_ON(ret)) { - drm_gem_object_unreference(&obj->base); + i915_gem_object_put(obj); return ERR_PTR(ret); } } @@ -224,6 +204,37 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) return obj; } +static void i915_ppgtt_close(struct i915_address_space *vm) +{ + struct list_head *phases[] = { + &vm->active_list, + &vm->inactive_list, + &vm->unbound_list, + NULL, + }, **phase; + + GEM_BUG_ON(vm->closed); + vm->closed = true; + + for (phase = phases; *phase; phase++) { + struct i915_vma *vma, *vn; + + list_for_each_entry_safe(vma, vn, *phase, vm_link) + if (!i915_vma_is_closed(vma)) + i915_vma_close(vma); + } +} + +static void context_close(struct i915_gem_context *ctx) +{ + GEM_BUG_ON(ctx->closed); + ctx->closed = true; + if (ctx->ppgtt) + i915_ppgtt_close(&ctx->ppgtt->base); + ctx->file_priv = ERR_PTR(-EBADF); + i915_gem_context_put(ctx); +} + static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out) { int ret; @@ -271,13 +282,24 @@ __create_hw_context(struct drm_device *dev, ctx->ggtt_alignment = get_context_alignment(dev_priv); if (dev_priv->hw_context_size) { - struct drm_i915_gem_object *obj = - i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size); + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + + obj = i915_gem_alloc_context_obj(dev, + dev_priv->hw_context_size); if (IS_ERR(obj)) { ret = PTR_ERR(obj); goto err_out; } - ctx->engine[RCS].state = obj; + + vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL); + if (IS_ERR(vma)) { + i915_gem_object_put(obj); + ret = PTR_ERR(vma); + goto err_out; + } + + ctx->engine[RCS].state = vma; } /* Default context will never have a file_priv */ @@ -290,6 +312,9 @@ __create_hw_context(struct drm_device *dev, ret = DEFAULT_CONTEXT_HANDLE; ctx->file_priv = file_priv; + if (file_priv) + ctx->pid = get_task_pid(current, PIDTYPE_PID); + ctx->user_handle = ret; /* NB: Mark all slices as needing a remap so that when the context first * loads it will restore whatever remap state already exists. If there @@ -305,7 +330,7 @@ __create_hw_context(struct drm_device *dev, return ctx; err_out: - i915_gem_context_unreference(ctx); + context_close(ctx); return ERR_PTR(ret); } @@ -327,13 +352,14 @@ i915_gem_create_context(struct drm_device *dev, return ctx; if (USES_FULL_PPGTT(dev)) { - struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv); + struct i915_hw_ppgtt *ppgtt = + i915_ppgtt_create(to_i915(dev), file_priv); if (IS_ERR(ppgtt)) { DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", PTR_ERR(ppgtt)); idr_remove(&file_priv->context_idr, ctx->user_handle); - i915_gem_context_unreference(ctx); + context_close(ctx); return ERR_CAST(ppgtt); } @@ -388,9 +414,9 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx, struct intel_context *ce = &ctx->engine[engine->id]; if (ce->state) - i915_gem_object_ggtt_unpin(ce->state); + i915_vma_unpin(ce->state); - i915_gem_context_unreference(ctx); + i915_gem_context_put(ctx); } } @@ -504,7 +530,7 @@ void i915_gem_context_fini(struct drm_device *dev) lockdep_assert_held(&dev->struct_mutex); - i915_gem_context_unreference(dctx); + context_close(dctx); dev_priv->kernel_context = NULL; ida_destroy(&dev_priv->context_hw_ida); @@ -514,8 +540,7 @@ static int context_idr_cleanup(int id, void *p, void *data) { struct i915_gem_context *ctx = p; - ctx->file_priv = ERR_PTR(-EBADF); - i915_gem_context_unreference(ctx); + context_close(ctx); return 0; } @@ -552,12 +577,13 @@ static inline int mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) { struct drm_i915_private *dev_priv = req->i915; + struct intel_ring *ring = req->ring; struct intel_engine_cs *engine = req->engine; u32 flags = hw_flags | MI_MM_SPACE_GTT; const int num_rings = /* Use an extended w/a on ivb+ if signalling from other rings */ - i915_semaphore_is_enabled(dev_priv) ? - hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 : + i915.semaphores ? + INTEL_INFO(dev_priv)->num_rings - 1 : 0; int len, ret; @@ -567,7 +593,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) * itlb_before_ctx_switch. */ if (IS_GEN6(dev_priv)) { - ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0); + ret = engine->emit_flush(req, EMIT_INVALIDATE); if (ret) return ret; } @@ -589,64 +615,64 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ if (INTEL_GEN(dev_priv) >= 7) { - intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE); + intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE); if (num_rings) { struct intel_engine_cs *signaller; - intel_ring_emit(engine, + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); for_each_engine(signaller, dev_priv) { if (signaller == engine) continue; - intel_ring_emit_reg(engine, + intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base)); - intel_ring_emit(engine, + intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); } } } - intel_ring_emit(engine, MI_NOOP); - intel_ring_emit(engine, MI_SET_CONTEXT); - intel_ring_emit(engine, - i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) | - flags); + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_SET_CONTEXT); + intel_ring_emit(ring, + i915_ggtt_offset(req->ctx->engine[RCS].state) | flags); /* * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP * WaMiSetContext_Hang:snb,ivb,vlv */ - intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(ring, MI_NOOP); if (INTEL_GEN(dev_priv) >= 7) { if (num_rings) { struct intel_engine_cs *signaller; i915_reg_t last_reg = {}; /* keep gcc quiet */ - intel_ring_emit(engine, + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); for_each_engine(signaller, dev_priv) { if (signaller == engine) continue; last_reg = RING_PSMI_CTL(signaller->mmio_base); - intel_ring_emit_reg(engine, last_reg); - intel_ring_emit(engine, + intel_ring_emit_reg(ring, last_reg); + intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); } /* Insert a delay before the next switch! */ - intel_ring_emit(engine, + intel_ring_emit(ring, MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT); - intel_ring_emit_reg(engine, last_reg); - intel_ring_emit(engine, engine->scratch.gtt_offset); - intel_ring_emit(engine, MI_NOOP); + intel_ring_emit_reg(ring, last_reg); + intel_ring_emit(ring, + i915_ggtt_offset(engine->scratch)); + intel_ring_emit(ring, MI_NOOP); } - intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE); + intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); } - intel_ring_advance(engine); + intel_ring_advance(ring); return ret; } @@ -654,7 +680,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) static int remap_l3(struct drm_i915_gem_request *req, int slice) { u32 *remap_info = req->i915->l3_parity.remap_info[slice]; - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; int i, ret; if (!remap_info) @@ -669,13 +695,13 @@ static int remap_l3(struct drm_i915_gem_request *req, int slice) * here because no other code should access these registers other than * at initialization time. */ - intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4)); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4)); for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) { - intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i)); - intel_ring_emit(engine, remap_info[i]); + intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i)); + intel_ring_emit(ring, remap_info[i]); } - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } @@ -744,6 +770,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) struct i915_gem_context *to = req->ctx; struct intel_engine_cs *engine = req->engine; struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt; + struct i915_vma *vma = to->engine[RCS].state; struct i915_gem_context *from; u32 hw_flags; int ret, i; @@ -751,10 +778,15 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) if (skip_rcs_switch(ppgtt, engine, to)) return 0; + /* Clear this page out of any CPU caches for coherent swap-in/out. */ + if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { + ret = i915_gem_object_set_to_gtt_domain(vma->obj, false); + if (ret) + return ret; + } + /* Trying to pin first makes error handling easier. */ - ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state, - to->ggtt_alignment, - 0); + ret = i915_vma_pin(vma, 0, to->ggtt_alignment, PIN_GLOBAL); if (ret) return ret; @@ -767,18 +799,6 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) */ from = engine->last_context; - /* - * Clear this page out of any CPU caches for coherent swap-in/out. Note - * that thanks to write = false in this call and us not setting any gpu - * write domains when putting a context object onto the active list - * (when switching away from it), this won't block. - * - * XXX: We need a real interface to do this instead of trickery. - */ - ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false); - if (ret) - goto unpin_out; - if (needs_pd_load_pre(ppgtt, engine, to)) { /* Older GENs and non render rings still want the load first, * "PP_DCLV followed by PP_DIR_BASE register through Load @@ -787,7 +807,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) trace_switch_mm(engine, to); ret = ppgtt->switch_mm(ppgtt, req); if (ret) - goto unpin_out; + goto err; } if (!to->engine[RCS].initialised || i915_gem_context_is_default(to)) @@ -804,7 +824,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) if (to != from || (hw_flags & MI_FORCE_RESTORE)) { ret = mi_set_context(req, hw_flags); if (ret) - goto unpin_out; + goto err; } /* The backing object for the context is done after switching to the @@ -814,8 +834,6 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) * MI_SET_CONTEXT instead of when the next seqno has completed. */ if (from != NULL) { - from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; - i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req); /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the * whole damn pipeline, we don't need to explicitly mark the * object dirty. The only exception is that the context must be @@ -823,14 +841,12 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) * able to defer doing this until we know the object would be * swapped, but there is no way to do that yet. */ - from->engine[RCS].state->dirty = 1; - - /* obj is kept alive until the next request by its active ref */ - i915_gem_object_ggtt_unpin(from->engine[RCS].state); - i915_gem_context_unreference(from); + i915_vma_move_to_active(from->engine[RCS].state, req, 0); + /* state is kept alive until the next request */ + i915_vma_unpin(from->engine[RCS].state); + i915_gem_context_put(from); } - i915_gem_context_reference(to); - engine->last_context = to; + engine->last_context = i915_gem_context_get(to); /* GEN8 does *not* require an explicit reload if the PDPs have been * setup, and we do not wish to move them. @@ -872,8 +888,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) return 0; -unpin_out: - i915_gem_object_ggtt_unpin(to->engine[RCS].state); +err: + i915_vma_unpin(vma); return ret; } @@ -894,8 +910,9 @@ int i915_switch_context(struct drm_i915_gem_request *req) { struct intel_engine_cs *engine = req->engine; - WARN_ON(i915.enable_execlists); lockdep_assert_held(&req->i915->drm.struct_mutex); + if (i915.enable_execlists) + return 0; if (!req->ctx->engine[engine->id].state) { struct i915_gem_context *to = req->ctx; @@ -914,10 +931,9 @@ int i915_switch_context(struct drm_i915_gem_request *req) } if (to != engine->last_context) { - i915_gem_context_reference(to); if (engine->last_context) - i915_gem_context_unreference(engine->last_context); - engine->last_context = to; + i915_gem_context_put(engine->last_context); + engine->last_context = i915_gem_context_get(to); } return 0; @@ -926,6 +942,33 @@ int i915_switch_context(struct drm_i915_gem_request *req) return do_rcs_switch(req); } +int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv) +{ + struct intel_engine_cs *engine; + + for_each_engine(engine, dev_priv) { + struct drm_i915_gem_request *req; + int ret; + + if (engine->last_context == NULL) + continue; + + if (engine->last_context == dev_priv->kernel_context) + continue; + + req = i915_gem_request_alloc(engine, dev_priv->kernel_context); + if (IS_ERR(req)) + return PTR_ERR(req); + + ret = i915_switch_context(req); + i915_add_request_no_flush(req); + if (ret) + return ret; + } + + return 0; +} + static bool contexts_enabled(struct drm_device *dev) { return i915.enable_execlists || to_i915(dev)->hw_context_size; @@ -985,7 +1028,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, } idr_remove(&file_priv->context_idr, ctx->user_handle); - i915_gem_context_unreference(ctx); + context_close(ctx); mutex_unlock(&dev->struct_mutex); DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id); diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c deleted file mode 100644 index a56516482394..000000000000 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright © 2008 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - * Authors: - * Keith Packard <keithp@keithp.com> - * - */ - -#include <drm/drmP.h> -#include <drm/i915_drm.h> -#include "i915_drv.h" - -#if WATCH_LISTS -int -i915_verify_lists(struct drm_device *dev) -{ - static int warned; - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_i915_gem_object *obj; - struct intel_engine_cs *engine; - int err = 0; - - if (warned) - return 0; - - for_each_engine(engine, dev_priv) { - list_for_each_entry(obj, &engine->active_list, - engine_list[engine->id]) { - if (obj->base.dev != dev || - !atomic_read(&obj->base.refcount.refcount)) { - DRM_ERROR("%s: freed active obj %p\n", - engine->name, obj); - err++; - break; - } else if (!obj->active || - obj->last_read_req[engine->id] == NULL) { - DRM_ERROR("%s: invalid active obj %p\n", - engine->name, obj); - err++; - } else if (obj->base.write_domain) { - DRM_ERROR("%s: invalid write obj %p (w %x)\n", - engine->name, - obj, obj->base.write_domain); - err++; - } - } - } - - return warned = err; -} -#endif /* WATCH_LIST */ diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 80bbe43a2e92..10265bb35604 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -23,9 +23,13 @@ * Authors: * Dave Airlie <airlied@redhat.com> */ + +#include <linux/dma-buf.h> +#include <linux/reservation.h> + #include <drm/drmP.h> + #include "i915_drv.h" -#include <linux/dma-buf.h> static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf) { @@ -115,7 +119,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) if (ret) return ERR_PTR(ret); - addr = i915_gem_object_pin_map(obj); + addr = i915_gem_object_pin_map(obj, I915_MAP_WB); mutex_unlock(&dev->struct_mutex); return addr; @@ -218,25 +222,73 @@ static const struct dma_buf_ops i915_dmabuf_ops = { .end_cpu_access = i915_gem_end_cpu_access, }; +static void export_fences(struct drm_i915_gem_object *obj, + struct dma_buf *dma_buf) +{ + struct reservation_object *resv = dma_buf->resv; + struct drm_i915_gem_request *req; + unsigned long active; + int idx; + + active = __I915_BO_ACTIVE(obj); + if (!active) + return; + + /* Serialise with execbuf to prevent concurrent fence-loops */ + mutex_lock(&obj->base.dev->struct_mutex); + + /* Mark the object for future fences before racily adding old fences */ + obj->base.dma_buf = dma_buf; + + ww_mutex_lock(&resv->lock, NULL); + + for_each_active(active, idx) { + req = i915_gem_active_get(&obj->last_read[idx], + &obj->base.dev->struct_mutex); + if (!req) + continue; + + if (reservation_object_reserve_shared(resv) == 0) + reservation_object_add_shared_fence(resv, &req->fence); + + i915_gem_request_put(req); + } + + req = i915_gem_active_get(&obj->last_write, + &obj->base.dev->struct_mutex); + if (req) { + reservation_object_add_excl_fence(resv, &req->fence); + i915_gem_request_put(req); + } + + ww_mutex_unlock(&resv->lock); + mutex_unlock(&obj->base.dev->struct_mutex); +} + struct dma_buf *i915_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gem_obj, int flags) { struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + struct dma_buf *dma_buf; exp_info.ops = &i915_dmabuf_ops; exp_info.size = gem_obj->size; exp_info.flags = flags; exp_info.priv = gem_obj; - if (obj->ops->dmabuf_export) { int ret = obj->ops->dmabuf_export(obj); if (ret) return ERR_PTR(ret); } - return dma_buf_export(&exp_info); + dma_buf = dma_buf_export(&exp_info); + if (IS_ERR(dma_buf)) + return dma_buf; + + export_fences(obj, dma_buf); + return dma_buf; } static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) @@ -278,8 +330,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, * Importing dmabuf exported from out own gem increases * refcount on gem itself instead of f_count of dmabuf. */ - drm_gem_object_reference(&obj->base); - return &obj->base; + return &i915_gem_object_get(obj)->base; } } @@ -300,6 +351,16 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops); obj->base.import_attach = attach; + /* We use GTT as shorthand for a coherent domain, one that is + * neither in the GPU cache nor in the CPU cache, where all + * writes are immediately visible in memory. (That's not strictly + * true, but it's close! There are internal buffers such as the + * write-combined buffer or a delay through the chipset for GTT + * writes that do require us to treat GTT as a separate cache domain.) + */ + obj->base.read_domains = I915_GEM_DOMAIN_GTT; + obj->base.write_domain = 0; + return &obj->base; fail_detach: diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 3c1280ec7ff6..815d5fbe07ac 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -33,53 +33,37 @@ #include "intel_drv.h" #include "i915_trace.h" -static int switch_to_pinned_context(struct drm_i915_private *dev_priv) +static bool +gpu_is_idle(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; - if (i915.enable_execlists) - return 0; - for_each_engine(engine, dev_priv) { - struct drm_i915_gem_request *req; - int ret; - - if (engine->last_context == NULL) - continue; - - if (engine->last_context == dev_priv->kernel_context) - continue; - - req = i915_gem_request_alloc(engine, dev_priv->kernel_context); - if (IS_ERR(req)) - return PTR_ERR(req); - - ret = i915_switch_context(req); - i915_add_request_no_flush(req); - if (ret) - return ret; + if (intel_engine_is_active(engine)) + return false; } - return 0; + return true; } - static bool -mark_free(struct i915_vma *vma, struct list_head *unwind) +mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind) { - if (vma->pin_count) + if (i915_vma_is_pinned(vma)) return false; if (WARN_ON(!list_empty(&vma->exec_list))) return false; + if (flags & PIN_NONFAULT && vma->obj->fault_mappable) + return false; + list_add(&vma->exec_list, unwind); return drm_mm_scan_add_block(&vma->node); } /** * i915_gem_evict_something - Evict vmas to make room for binding a new one - * @dev: drm_device * @vm: address space to evict from * @min_size: size of the desired free space * @alignment: alignment constraint of the desired free space @@ -102,42 +86,37 @@ mark_free(struct i915_vma *vma, struct list_head *unwind) * memory in e.g. the shrinker. */ int -i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, - int min_size, unsigned alignment, unsigned cache_level, - unsigned long start, unsigned long end, +i915_gem_evict_something(struct i915_address_space *vm, + u64 min_size, u64 alignment, + unsigned cache_level, + u64 start, u64 end, unsigned flags) { - struct list_head eviction_list, unwind_list; - struct i915_vma *vma; - int ret = 0; - int pass = 0; + struct drm_i915_private *dev_priv = to_i915(vm->dev); + struct list_head eviction_list; + struct list_head *phases[] = { + &vm->inactive_list, + &vm->active_list, + NULL, + }, **phase; + struct i915_vma *vma, *next; + int ret; - trace_i915_gem_evict(dev, min_size, alignment, flags); + trace_i915_gem_evict(vm, min_size, alignment, flags); /* * The goal is to evict objects and amalgamate space in LRU order. * The oldest idle objects reside on the inactive list, which is in - * retirement order. The next objects to retire are those on the (per - * ring) active list that do not have an outstanding flush. Once the - * hardware reports completion (the seqno is updated after the - * batchbuffer has been finished) the clean buffer objects would - * be retired to the inactive list. Any dirty objects would be added - * to the tail of the flushing list. So after processing the clean - * active objects we need to emit a MI_FLUSH to retire the flushing - * list, hence the retirement order of the flushing list is in - * advance of the dirty objects on the active lists. + * retirement order. The next objects to retire are those in flight, + * on the active list, again in retirement order. * * The retirement sequence is thus: * 1. Inactive objects (already retired) - * 2. Clean active objects - * 3. Flushing list - * 4. Dirty active objects. + * 2. Active objects (will stall on unbinding) * * On each list, the oldest objects lie at the HEAD with the freshest * object on the TAIL. */ - - INIT_LIST_HEAD(&unwind_list); if (start != 0 || end != vm->total) { drm_mm_init_scan_with_range(&vm->mm, min_size, alignment, cache_level, @@ -145,96 +124,84 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, } else drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); -search_again: - /* First see if there is a large enough contiguous idle region... */ - list_for_each_entry(vma, &vm->inactive_list, vm_link) { - if (mark_free(vma, &unwind_list)) - goto found; - } - if (flags & PIN_NONBLOCK) - goto none; + phases[1] = NULL; - /* Now merge in the soon-to-be-expired objects... */ - list_for_each_entry(vma, &vm->active_list, vm_link) { - if (mark_free(vma, &unwind_list)) - goto found; - } +search_again: + INIT_LIST_HEAD(&eviction_list); + phase = phases; + do { + list_for_each_entry(vma, *phase, vm_link) + if (mark_free(vma, flags, &eviction_list)) + goto found; + } while (*++phase); -none: /* Nothing found, clean up and bail out! */ - while (!list_empty(&unwind_list)) { - vma = list_first_entry(&unwind_list, - struct i915_vma, - exec_list); + list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { ret = drm_mm_scan_remove_block(&vma->node); BUG_ON(ret); - list_del_init(&vma->exec_list); + INIT_LIST_HEAD(&vma->exec_list); } /* Can we unpin some objects such as idle hw contents, - * or pending flips? + * or pending flips? But since only the GGTT has global entries + * such as scanouts, rinbuffers and contexts, we can skip the + * purge when inspecting per-process local address spaces. */ - if (flags & PIN_NONBLOCK) + if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) return -ENOSPC; - /* Only idle the GPU and repeat the search once */ - if (pass++ == 0) { - struct drm_i915_private *dev_priv = to_i915(dev); - - if (i915_is_ggtt(vm)) { - ret = switch_to_pinned_context(dev_priv); - if (ret) - return ret; - } - - ret = i915_gem_wait_for_idle(dev_priv); - if (ret) - return ret; - - i915_gem_retire_requests(dev_priv); - goto search_again; + if (gpu_is_idle(dev_priv)) { + /* If we still have pending pageflip completions, drop + * back to userspace to give our workqueues time to + * acquire our locks and unpin the old scanouts. + */ + return intel_has_pending_fb_unpin(vm->dev) ? -EAGAIN : -ENOSPC; } - /* If we still have pending pageflip completions, drop - * back to userspace to give our workqueues time to - * acquire our locks and unpin the old scanouts. + /* Not everything in the GGTT is tracked via vma (otherwise we + * could evict as required with minimal stalling) so we are forced + * to idle the GPU and explicitly retire outstanding requests in + * the hopes that we can then remove contexts and the like only + * bound by their active reference. */ - return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC; + ret = i915_gem_switch_to_kernel_context(dev_priv); + if (ret) + return ret; + + ret = i915_gem_wait_for_idle(dev_priv, true); + if (ret) + return ret; + + i915_gem_retire_requests(dev_priv); + goto search_again; found: /* drm_mm doesn't allow any other other operations while - * scanning, therefore store to be evicted objects on a - * temporary list. */ - INIT_LIST_HEAD(&eviction_list); - while (!list_empty(&unwind_list)) { - vma = list_first_entry(&unwind_list, - struct i915_vma, - exec_list); - if (drm_mm_scan_remove_block(&vma->node)) { - list_move(&vma->exec_list, &eviction_list); - drm_gem_object_reference(&vma->obj->base); - continue; - } - list_del_init(&vma->exec_list); + * scanning, therefore store to-be-evicted objects on a + * temporary list and take a reference for all before + * calling unbind (which may remove the active reference + * of any of our objects, thus corrupting the list). + */ + list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { + if (drm_mm_scan_remove_block(&vma->node)) + __i915_vma_pin(vma); + else + list_del_init(&vma->exec_list); } /* Unbinding will emit any required flushes */ while (!list_empty(&eviction_list)) { - struct drm_gem_object *obj; vma = list_first_entry(&eviction_list, struct i915_vma, exec_list); - obj = &vma->obj->base; list_del_init(&vma->exec_list); + __i915_vma_unpin(vma); if (ret == 0) ret = i915_vma_unbind(vma); - - drm_gem_object_unreference(obj); } - return ret; } @@ -256,8 +223,8 @@ i915_gem_evict_for_vma(struct i915_vma *target) vma = container_of(node, typeof(*vma), node); - if (vma->pin_count) { - if (!vma->exec_entry || (vma->pin_count > 1)) + if (i915_vma_is_pinned(vma)) { + if (!vma->exec_entry || i915_vma_pin_count(vma) > 1) /* Object is pinned for some other use */ return -EBUSY; @@ -303,22 +270,21 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) struct drm_i915_private *dev_priv = to_i915(vm->dev); if (i915_is_ggtt(vm)) { - ret = switch_to_pinned_context(dev_priv); + ret = i915_gem_switch_to_kernel_context(dev_priv); if (ret) return ret; } - ret = i915_gem_wait_for_idle(dev_priv); + ret = i915_gem_wait_for_idle(dev_priv, true); if (ret) return ret; i915_gem_retire_requests(dev_priv); - WARN_ON(!list_empty(&vm->active_list)); } list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link) - if (vma->pin_count == 0) + if (!i915_vma_is_pinned(vma)) WARN_ON(i915_vma_unbind(vma)); return 0; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 1978633e7549..601156c353cc 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -26,22 +26,42 @@ * */ +#include <linux/dma_remapping.h> +#include <linux/reservation.h> +#include <linux/uaccess.h> + #include <drm/drmP.h> #include <drm/i915_drm.h> + #include "i915_drv.h" +#include "i915_gem_dmabuf.h" #include "i915_trace.h" #include "intel_drv.h" -#include <linux/dma_remapping.h> -#include <linux/uaccess.h> +#include "intel_frontbuffer.h" + +#define DBG_USE_CPU_RELOC 0 /* -1 force GTT relocs; 1 force CPU relocs */ -#define __EXEC_OBJECT_HAS_PIN (1<<31) -#define __EXEC_OBJECT_HAS_FENCE (1<<30) -#define __EXEC_OBJECT_NEEDS_MAP (1<<29) -#define __EXEC_OBJECT_NEEDS_BIAS (1<<28) +#define __EXEC_OBJECT_HAS_PIN (1<<31) +#define __EXEC_OBJECT_HAS_FENCE (1<<30) +#define __EXEC_OBJECT_NEEDS_MAP (1<<29) +#define __EXEC_OBJECT_NEEDS_BIAS (1<<28) +#define __EXEC_OBJECT_INTERNAL_FLAGS (0xf<<28) /* all of the above */ #define BATCH_OFFSET_BIAS (256*1024) +struct i915_execbuffer_params { + struct drm_device *dev; + struct drm_file *file; + struct i915_vma *batch; + u32 dispatch_flags; + u32 args_batch_start_offset; + struct intel_engine_cs *engine; + struct i915_gem_context *ctx; + struct drm_i915_gem_request *request; +}; + struct eb_vmas { + struct drm_i915_private *i915; struct list_head vmas; int and; union { @@ -51,7 +71,8 @@ struct eb_vmas { }; static struct eb_vmas * -eb_create(struct drm_i915_gem_execbuffer2 *args) +eb_create(struct drm_i915_private *i915, + struct drm_i915_gem_execbuffer2 *args) { struct eb_vmas *eb = NULL; @@ -78,6 +99,7 @@ eb_create(struct drm_i915_gem_execbuffer2 *args) } else eb->and = -args->buffer_count; + eb->i915 = i915; INIT_LIST_HEAD(&eb->vmas); return eb; } @@ -89,6 +111,26 @@ eb_reset(struct eb_vmas *eb) memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); } +static struct i915_vma * +eb_get_batch(struct eb_vmas *eb) +{ + struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list); + + /* + * SNA is doing fancy tricks with compressing batch buffers, which leads + * to negative relocation deltas. Usually that works out ok since the + * relocate address is still positive, except when the batch is placed + * very low in the GTT. Ensure this doesn't happen. + * + * Note that actual hangs have only been observed on gen7, but for + * paranoia do it everywhere. + */ + if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0) + vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS; + + return vma; +} + static int eb_lookup_vmas(struct eb_vmas *eb, struct drm_i915_gem_exec_object2 *exec, @@ -122,7 +164,7 @@ eb_lookup_vmas(struct eb_vmas *eb, goto err; } - drm_gem_object_reference(&obj->base); + i915_gem_object_get(obj); list_add_tail(&obj->obj_exec_link, &objects); } spin_unlock(&file->table_lock); @@ -143,8 +185,8 @@ eb_lookup_vmas(struct eb_vmas *eb, * from the (obj, vm) we don't run the risk of creating * duplicated vmas for the same vm. */ - vma = i915_gem_obj_lookup_or_create_vma(obj, vm); - if (IS_ERR(vma)) { + vma = i915_gem_obj_lookup_or_create_vma(obj, vm, NULL); + if (unlikely(IS_ERR(vma))) { DRM_DEBUG("Failed to lookup VMA\n"); ret = PTR_ERR(vma); goto err; @@ -175,7 +217,7 @@ err: struct drm_i915_gem_object, obj_exec_link); list_del_init(&obj->obj_exec_link); - drm_gem_object_unreference(&obj->base); + i915_gem_object_put(obj); } /* * Objects already transfered to the vmas list will be unreferenced by @@ -208,7 +250,6 @@ static void i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma) { struct drm_i915_gem_exec_object2 *entry; - struct drm_i915_gem_object *obj = vma->obj; if (!drm_mm_node_allocated(&vma->node)) return; @@ -216,10 +257,10 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma) entry = vma->exec_entry; if (entry->flags & __EXEC_OBJECT_HAS_FENCE) - i915_gem_object_unpin_fence(obj); + i915_vma_unpin_fence(vma); if (entry->flags & __EXEC_OBJECT_HAS_PIN) - vma->pin_count--; + __i915_vma_unpin(vma); entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); } @@ -234,13 +275,19 @@ static void eb_destroy(struct eb_vmas *eb) exec_list); list_del_init(&vma->exec_list); i915_gem_execbuffer_unreserve_vma(vma); - drm_gem_object_unreference(&vma->obj->base); + i915_vma_put(vma); } kfree(eb); } static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) { + if (!i915_gem_object_has_struct_page(obj)) + return false; + + if (DBG_USE_CPU_RELOC) + return DBG_USE_CPU_RELOC > 0; + return (HAS_LLC(obj->base.dev) || obj->base.write_domain == I915_GEM_DOMAIN_CPU || obj->cache_level != I915_CACHE_NONE); @@ -265,144 +312,265 @@ static inline uint64_t gen8_noncanonical_addr(uint64_t address) } static inline uint64_t -relocation_target(struct drm_i915_gem_relocation_entry *reloc, +relocation_target(const struct drm_i915_gem_relocation_entry *reloc, uint64_t target_offset) { return gen8_canonical_addr((int)reloc->delta + target_offset); } -static int -relocate_entry_cpu(struct drm_i915_gem_object *obj, - struct drm_i915_gem_relocation_entry *reloc, - uint64_t target_offset) +struct reloc_cache { + struct drm_i915_private *i915; + struct drm_mm_node node; + unsigned long vaddr; + unsigned int page; + bool use_64bit_reloc; +}; + +static void reloc_cache_init(struct reloc_cache *cache, + struct drm_i915_private *i915) { - struct drm_device *dev = obj->base.dev; - uint32_t page_offset = offset_in_page(reloc->offset); - uint64_t delta = relocation_target(reloc, target_offset); - char *vaddr; - int ret; + cache->page = -1; + cache->vaddr = 0; + cache->i915 = i915; + cache->use_64bit_reloc = INTEL_GEN(cache->i915) >= 8; + cache->node.allocated = false; +} - ret = i915_gem_object_set_to_cpu_domain(obj, true); - if (ret) - return ret; +static inline void *unmask_page(unsigned long p) +{ + return (void *)(uintptr_t)(p & PAGE_MASK); +} + +static inline unsigned int unmask_flags(unsigned long p) +{ + return p & ~PAGE_MASK; +} - vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, - reloc->offset >> PAGE_SHIFT)); - *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta); +#define KMAP 0x4 /* after CLFLUSH_FLAGS */ + +static void reloc_cache_fini(struct reloc_cache *cache) +{ + void *vaddr; + + if (!cache->vaddr) + return; - if (INTEL_INFO(dev)->gen >= 8) { - page_offset = offset_in_page(page_offset + sizeof(uint32_t)); + vaddr = unmask_page(cache->vaddr); + if (cache->vaddr & KMAP) { + if (cache->vaddr & CLFLUSH_AFTER) + mb(); - if (page_offset == 0) { - kunmap_atomic(vaddr); - vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, - (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT)); + kunmap_atomic(vaddr); + i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm); + } else { + wmb(); + io_mapping_unmap_atomic((void __iomem *)vaddr); + if (cache->node.allocated) { + struct i915_ggtt *ggtt = &cache->i915->ggtt; + + ggtt->base.clear_range(&ggtt->base, + cache->node.start, + cache->node.size, + true); + drm_mm_remove_node(&cache->node); + } else { + i915_vma_unpin((struct i915_vma *)cache->node.mm); } + } +} + +static void *reloc_kmap(struct drm_i915_gem_object *obj, + struct reloc_cache *cache, + int page) +{ + void *vaddr; + + if (cache->vaddr) { + kunmap_atomic(unmask_page(cache->vaddr)); + } else { + unsigned int flushes; + int ret; + + ret = i915_gem_obj_prepare_shmem_write(obj, &flushes); + if (ret) + return ERR_PTR(ret); + + BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS); + BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK); - *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta); + cache->vaddr = flushes | KMAP; + cache->node.mm = (void *)obj; + if (flushes) + mb(); } - kunmap_atomic(vaddr); + vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page)); + cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr; + cache->page = page; - return 0; + return vaddr; } -static int -relocate_entry_gtt(struct drm_i915_gem_object *obj, - struct drm_i915_gem_relocation_entry *reloc, - uint64_t target_offset) +static void *reloc_iomap(struct drm_i915_gem_object *obj, + struct reloc_cache *cache, + int page) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; - uint64_t delta = relocation_target(reloc, target_offset); - uint64_t offset; - void __iomem *reloc_page; - int ret; + struct i915_ggtt *ggtt = &cache->i915->ggtt; + unsigned long offset; + void *vaddr; - ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret) - return ret; + if (cache->node.allocated) { + wmb(); + ggtt->base.insert_page(&ggtt->base, + i915_gem_object_get_dma_address(obj, page), + cache->node.start, I915_CACHE_NONE, 0); + cache->page = page; + return unmask_page(cache->vaddr); + } - ret = i915_gem_object_put_fence(obj); - if (ret) - return ret; + if (cache->vaddr) { + io_mapping_unmap_atomic(unmask_page(cache->vaddr)); + } else { + struct i915_vma *vma; + int ret; + + if (use_cpu_reloc(obj)) + return NULL; + + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + return ERR_PTR(ret); + + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, + PIN_MAPPABLE | PIN_NONBLOCK); + if (IS_ERR(vma)) { + memset(&cache->node, 0, sizeof(cache->node)); + ret = drm_mm_insert_node_in_range_generic + (&ggtt->base.mm, &cache->node, + 4096, 0, 0, + 0, ggtt->mappable_end, + DRM_MM_SEARCH_DEFAULT, + DRM_MM_CREATE_DEFAULT); + if (ret) + return ERR_PTR(ret); + } else { + ret = i915_vma_put_fence(vma); + if (ret) { + i915_vma_unpin(vma); + return ERR_PTR(ret); + } - /* Map the page containing the relocation we're going to perform. */ - offset = i915_gem_obj_ggtt_offset(obj); - offset += reloc->offset; - reloc_page = io_mapping_map_atomic_wc(ggtt->mappable, - offset & PAGE_MASK); - iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset)); - - if (INTEL_INFO(dev)->gen >= 8) { - offset += sizeof(uint32_t); - - if (offset_in_page(offset) == 0) { - io_mapping_unmap_atomic(reloc_page); - reloc_page = - io_mapping_map_atomic_wc(ggtt->mappable, - offset); + cache->node.start = vma->node.start; + cache->node.mm = (void *)vma; } + } - iowrite32(upper_32_bits(delta), - reloc_page + offset_in_page(offset)); + offset = cache->node.start; + if (cache->node.allocated) { + ggtt->base.insert_page(&ggtt->base, + i915_gem_object_get_dma_address(obj, page), + offset, I915_CACHE_NONE, 0); + } else { + offset += page << PAGE_SHIFT; } - io_mapping_unmap_atomic(reloc_page); + vaddr = io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset); + cache->page = page; + cache->vaddr = (unsigned long)vaddr; - return 0; + return vaddr; } -static void -clflush_write32(void *addr, uint32_t value) +static void *reloc_vaddr(struct drm_i915_gem_object *obj, + struct reloc_cache *cache, + int page) { - /* This is not a fast path, so KISS. */ - drm_clflush_virt_range(addr, sizeof(uint32_t)); - *(uint32_t *)addr = value; - drm_clflush_virt_range(addr, sizeof(uint32_t)); + void *vaddr; + + if (cache->page == page) { + vaddr = unmask_page(cache->vaddr); + } else { + vaddr = NULL; + if ((cache->vaddr & KMAP) == 0) + vaddr = reloc_iomap(obj, cache, page); + if (!vaddr) + vaddr = reloc_kmap(obj, cache, page); + } + + return vaddr; } -static int -relocate_entry_clflush(struct drm_i915_gem_object *obj, - struct drm_i915_gem_relocation_entry *reloc, - uint64_t target_offset) +static void clflush_write32(u32 *addr, u32 value, unsigned int flushes) { - struct drm_device *dev = obj->base.dev; - uint32_t page_offset = offset_in_page(reloc->offset); - uint64_t delta = relocation_target(reloc, target_offset); - char *vaddr; - int ret; + if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) { + if (flushes & CLFLUSH_BEFORE) { + clflushopt(addr); + mb(); + } - ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret) - return ret; + *addr = value; + + /* Writes to the same cacheline are serialised by the CPU + * (including clflush). On the write path, we only require + * that it hits memory in an orderly fashion and place + * mb barriers at the start and end of the relocation phase + * to ensure ordering of clflush wrt to the system. + */ + if (flushes & CLFLUSH_AFTER) + clflushopt(addr); + } else + *addr = value; +} - vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, - reloc->offset >> PAGE_SHIFT)); - clflush_write32(vaddr + page_offset, lower_32_bits(delta)); +static int +relocate_entry(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_relocation_entry *reloc, + struct reloc_cache *cache, + u64 target_offset) +{ + u64 offset = reloc->offset; + bool wide = cache->use_64bit_reloc; + void *vaddr; + + target_offset = relocation_target(reloc, target_offset); +repeat: + vaddr = reloc_vaddr(obj, cache, offset >> PAGE_SHIFT); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + + clflush_write32(vaddr + offset_in_page(offset), + lower_32_bits(target_offset), + cache->vaddr); + + if (wide) { + offset += sizeof(u32); + target_offset >>= 32; + wide = false; + goto repeat; + } - if (INTEL_INFO(dev)->gen >= 8) { - page_offset = offset_in_page(page_offset + sizeof(uint32_t)); + return 0; +} - if (page_offset == 0) { - kunmap_atomic(vaddr); - vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, - (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT)); - } +static bool object_is_idle(struct drm_i915_gem_object *obj) +{ + unsigned long active = i915_gem_object_get_active(obj); + int idx; - clflush_write32(vaddr + page_offset, upper_32_bits(delta)); + for_each_active(active, idx) { + if (!i915_gem_active_is_idle(&obj->last_read[idx], + &obj->base.dev->struct_mutex)) + return false; } - kunmap_atomic(vaddr); - - return 0; + return true; } static int i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, struct eb_vmas *eb, - struct drm_i915_gem_relocation_entry *reloc) + struct drm_i915_gem_relocation_entry *reloc, + struct reloc_cache *cache) { struct drm_device *dev = obj->base.dev; struct drm_gem_object *target_obj; @@ -465,7 +633,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, /* Check that the relocation address is valid... */ if (unlikely(reloc->offset > - obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) { + obj->base.size - (cache->use_64bit_reloc ? 8 : 4))) { DRM_DEBUG("Relocation beyond object bounds: " "obj %p target %d offset %d size %d.\n", obj, reloc->target_handle, @@ -482,26 +650,15 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, } /* We can't wait for rendering with pagefaults disabled */ - if (obj->active && pagefault_disabled()) + if (pagefault_disabled() && !object_is_idle(obj)) return -EFAULT; - if (use_cpu_reloc(obj)) - ret = relocate_entry_cpu(obj, reloc, target_offset); - else if (obj->map_and_fenceable) - ret = relocate_entry_gtt(obj, reloc, target_offset); - else if (static_cpu_has(X86_FEATURE_CLFLUSH)) - ret = relocate_entry_clflush(obj, reloc, target_offset); - else { - WARN_ONCE(1, "Impossible case in relocation handling\n"); - ret = -ENODEV; - } - + ret = relocate_entry(obj, reloc, cache, target_offset); if (ret) return ret; /* and update the user's relocation entry */ reloc->presumed_offset = target_offset; - return 0; } @@ -513,9 +670,11 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)]; struct drm_i915_gem_relocation_entry __user *user_relocs; struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; - int remain, ret; + struct reloc_cache cache; + int remain, ret = 0; user_relocs = u64_to_user_ptr(entry->relocs_ptr); + reloc_cache_init(&cache, eb->i915); remain = entry->relocation_count; while (remain) { @@ -525,19 +684,23 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, count = ARRAY_SIZE(stack_reloc); remain -= count; - if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]))) - return -EFAULT; + if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]))) { + ret = -EFAULT; + goto out; + } do { u64 offset = r->presumed_offset; - ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r); + ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache); if (ret) - return ret; + goto out; if (r->presumed_offset != offset && - __put_user(r->presumed_offset, &user_relocs->presumed_offset)) { - return -EFAULT; + __put_user(r->presumed_offset, + &user_relocs->presumed_offset)) { + ret = -EFAULT; + goto out; } user_relocs++; @@ -545,7 +708,9 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, } while (--count); } - return 0; +out: + reloc_cache_fini(&cache); + return ret; #undef N_RELOC } @@ -555,15 +720,18 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma, struct drm_i915_gem_relocation_entry *relocs) { const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; - int i, ret; + struct reloc_cache cache; + int i, ret = 0; + reloc_cache_init(&cache, eb->i915); for (i = 0; i < entry->relocation_count; i++) { - ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]); + ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache); if (ret) - return ret; + break; } + reloc_cache_fini(&cache); - return 0; + return ret; } static int @@ -626,23 +794,27 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, flags |= PIN_HIGH; } - ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); - if ((ret == -ENOSPC || ret == -E2BIG) && + ret = i915_vma_pin(vma, + entry->pad_to_size, + entry->alignment, + flags); + if ((ret == -ENOSPC || ret == -E2BIG) && only_mappable_for_reloc(entry->flags)) - ret = i915_gem_object_pin(obj, vma->vm, - entry->alignment, - flags & ~PIN_MAPPABLE); + ret = i915_vma_pin(vma, + entry->pad_to_size, + entry->alignment, + flags & ~PIN_MAPPABLE); if (ret) return ret; entry->flags |= __EXEC_OBJECT_HAS_PIN; if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { - ret = i915_gem_object_get_fence(obj); + ret = i915_vma_get_fence(vma); if (ret) return ret; - if (i915_gem_object_pin_fence(obj)) + if (i915_vma_pin_fence(vma)) entry->flags |= __EXEC_OBJECT_HAS_FENCE; } @@ -667,7 +839,7 @@ need_reloc_mappable(struct i915_vma *vma) if (entry->relocation_count == 0) return false; - if (!vma->is_ggtt) + if (!i915_vma_is_ggtt(vma)) return false; /* See also use_cpu_reloc() */ @@ -684,14 +856,17 @@ static bool eb_vma_misplaced(struct i915_vma *vma) { struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; - struct drm_i915_gem_object *obj = vma->obj; - WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt); + WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && + !i915_vma_is_ggtt(vma)); if (entry->alignment && vma->node.start & (entry->alignment - 1)) return true; + if (vma->node.size < entry->pad_to_size) + return true; + if (entry->flags & EXEC_OBJECT_PINNED && vma->node.start != entry->offset) return true; @@ -701,7 +876,8 @@ eb_vma_misplaced(struct i915_vma *vma) return true; /* avoid costly ping-pong once a batch bo ended up non-mappable */ - if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable) + if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && + !i915_vma_is_map_and_fenceable(vma)) return !only_mappable_for_reloc(entry->flags); if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 && @@ -725,8 +901,6 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4; int retry; - i915_gem_retire_requests_ring(engine); - vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; INIT_LIST_HEAD(&ordered_vmas); @@ -746,7 +920,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE; need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && - obj->tiling_mode != I915_TILING_NONE; + i915_gem_object_is_tiled(obj); need_mappable = need_fence || need_reloc_mappable(vma); if (entry->flags & EXEC_OBJECT_PINNED) @@ -843,7 +1017,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); list_del_init(&vma->exec_list); i915_gem_execbuffer_unreserve_vma(vma); - drm_gem_object_unreference(&vma->obj->base); + i915_vma_put(vma); } mutex_unlock(&dev->struct_mutex); @@ -937,41 +1111,42 @@ err: return ret; } +static unsigned int eb_other_engines(struct drm_i915_gem_request *req) +{ + unsigned int mask; + + mask = ~intel_engine_flag(req->engine) & I915_BO_ACTIVE_MASK; + mask <<= I915_BO_ACTIVE_SHIFT; + + return mask; +} + static int i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, struct list_head *vmas) { - const unsigned other_rings = ~intel_engine_flag(req->engine); + const unsigned int other_rings = eb_other_engines(req); struct i915_vma *vma; - uint32_t flush_domains = 0; - bool flush_chipset = false; int ret; list_for_each_entry(vma, vmas, exec_list) { struct drm_i915_gem_object *obj = vma->obj; - if (obj->active & other_rings) { - ret = i915_gem_object_sync(obj, req->engine, &req); + if (obj->flags & other_rings) { + ret = i915_gem_object_sync(obj, req); if (ret) return ret; } if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) - flush_chipset |= i915_gem_clflush_object(obj, false); - - flush_domains |= obj->base.write_domain; + i915_gem_clflush_object(obj, false); } - if (flush_chipset) - i915_gem_chipset_flush(req->engine->i915); - - if (flush_domains & I915_GEM_DOMAIN_GTT) - wmb(); + /* Unconditionally flush any chipset caches (for streaming writes). */ + i915_gem_chipset_flush(req->engine->i915); - /* Unconditionally invalidate gpu caches and ensure that we do flush - * any residual writes from the previous batch. - */ - return intel_ring_invalidate_all_caches(req); + /* Unconditionally invalidate GPU caches and TLBs. */ + return req->engine->emit_flush(req, EMIT_INVALIDATE); } static bool @@ -1007,6 +1182,9 @@ validate_exec_list(struct drm_device *dev, unsigned invalid_flags; int i; + /* INTERNAL flags must not overlap with external ones */ + BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS); + invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS; if (USES_FULL_PPGTT(dev)) invalid_flags |= EXEC_OBJECT_NEEDS_GTT; @@ -1036,6 +1214,14 @@ validate_exec_list(struct drm_device *dev, if (exec[i].alignment && !is_power_of_2(exec[i].alignment)) return -EINVAL; + /* pad_to_size was once a reserved field, so sanitize it */ + if (exec[i].flags & EXEC_OBJECT_PAD_TO_SIZE) { + if (offset_in_page(exec[i].pad_to_size)) + return -EINVAL; + } else { + exec[i].pad_to_size = 0; + } + /* First check for malicious input causing overflow in * the worst case where we need to allocate the entire * relocation tree as a single array. @@ -1086,66 +1272,99 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, return ctx; } -void +void i915_vma_move_to_active(struct i915_vma *vma, + struct drm_i915_gem_request *req, + unsigned int flags) +{ + struct drm_i915_gem_object *obj = vma->obj; + const unsigned int idx = req->engine->id; + + GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); + + obj->dirty = 1; /* be paranoid */ + + /* Add a reference if we're newly entering the active list. + * The order in which we add operations to the retirement queue is + * vital here: mark_active adds to the start of the callback list, + * such that subsequent callbacks are called first. Therefore we + * add the active reference first and queue for it to be dropped + * *last*. + */ + if (!i915_gem_object_is_active(obj)) + i915_gem_object_get(obj); + i915_gem_object_set_active(obj, idx); + i915_gem_active_set(&obj->last_read[idx], req); + + if (flags & EXEC_OBJECT_WRITE) { + i915_gem_active_set(&obj->last_write, req); + + intel_fb_obj_invalidate(obj, ORIGIN_CS); + + /* update for the implicit flush after a batch */ + obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; + } + + if (flags & EXEC_OBJECT_NEEDS_FENCE) + i915_gem_active_set(&vma->last_fence, req); + + i915_vma_set_active(vma, idx); + i915_gem_active_set(&vma->last_read[idx], req); + list_move_tail(&vma->vm_link, &vma->vm->active_list); +} + +static void eb_export_fence(struct drm_i915_gem_object *obj, + struct drm_i915_gem_request *req, + unsigned int flags) +{ + struct reservation_object *resv; + + resv = i915_gem_object_get_dmabuf_resv(obj); + if (!resv) + return; + + /* Ignore errors from failing to allocate the new fence, we can't + * handle an error right now. Worst case should be missed + * synchronisation leading to rendering corruption. + */ + ww_mutex_lock(&resv->lock, NULL); + if (flags & EXEC_OBJECT_WRITE) + reservation_object_add_excl_fence(resv, &req->fence); + else if (reservation_object_reserve_shared(resv) == 0) + reservation_object_add_shared_fence(resv, &req->fence); + ww_mutex_unlock(&resv->lock); +} + +static void i915_gem_execbuffer_move_to_active(struct list_head *vmas, struct drm_i915_gem_request *req) { - struct intel_engine_cs *engine = i915_gem_request_get_engine(req); struct i915_vma *vma; list_for_each_entry(vma, vmas, exec_list) { - struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; struct drm_i915_gem_object *obj = vma->obj; u32 old_read = obj->base.read_domains; u32 old_write = obj->base.write_domain; - obj->dirty = 1; /* be paranoid */ obj->base.write_domain = obj->base.pending_write_domain; - if (obj->base.write_domain == 0) + if (obj->base.write_domain) + vma->exec_entry->flags |= EXEC_OBJECT_WRITE; + else obj->base.pending_read_domains |= obj->base.read_domains; obj->base.read_domains = obj->base.pending_read_domains; - i915_vma_move_to_active(vma, req); - if (obj->base.write_domain) { - i915_gem_request_assign(&obj->last_write_req, req); - - intel_fb_obj_invalidate(obj, ORIGIN_CS); - - /* update for the implicit flush after a batch */ - obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; - } - if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { - i915_gem_request_assign(&obj->last_fenced_req, req); - if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { - struct drm_i915_private *dev_priv = engine->i915; - list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, - &dev_priv->mm.fence_list); - } - } - + i915_vma_move_to_active(vma, req, vma->exec_entry->flags); + eb_export_fence(obj, req, vma->exec_entry->flags); trace_i915_gem_object_change_domain(obj, old_read, old_write); } } -static void -i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params) -{ - /* Unconditionally force add_request to emit a full flush. */ - params->engine->gpu_caches_dirty = true; - - /* Add a breadcrumb for the completion of the batch buffer */ - __i915_add_request(params->request, params->batch_obj, true); -} - static int -i915_reset_gen7_sol_offsets(struct drm_device *dev, - struct drm_i915_gem_request *req) +i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req) { - struct intel_engine_cs *engine = req->engine; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_ring *ring = req->ring; int ret, i; - if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) { + if (!IS_GEN7(req->i915) || req->engine->id != RCS) { DRM_DEBUG("sol reset is gen7/rcs only\n"); return -EINVAL; } @@ -1155,21 +1374,21 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, return ret; for (i = 0; i < 4; i++) { - intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(engine, GEN7_SO_WRITE_OFFSET(i)); - intel_ring_emit(engine, 0); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i)); + intel_ring_emit(ring, 0); } - intel_ring_advance(engine); + intel_ring_advance(ring); return 0; } -static struct drm_i915_gem_object* +static struct i915_vma * i915_gem_execbuffer_parse(struct intel_engine_cs *engine, struct drm_i915_gem_exec_object2 *shadow_exec_entry, - struct eb_vmas *eb, struct drm_i915_gem_object *batch_obj, + struct eb_vmas *eb, u32 batch_start_offset, u32 batch_len, bool is_master) @@ -1181,51 +1400,44 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *engine, shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool, PAGE_ALIGN(batch_len)); if (IS_ERR(shadow_batch_obj)) - return shadow_batch_obj; - - ret = i915_parse_cmds(engine, - batch_obj, - shadow_batch_obj, - batch_start_offset, - batch_len, - is_master); - if (ret) - goto err; - - ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0); - if (ret) - goto err; + return ERR_CAST(shadow_batch_obj); + + ret = intel_engine_cmd_parser(engine, + batch_obj, + shadow_batch_obj, + batch_start_offset, + batch_len, + is_master); + if (ret) { + if (ret == -EACCES) /* unhandled chained batch */ + vma = NULL; + else + vma = ERR_PTR(ret); + goto out; + } - i915_gem_object_unpin_pages(shadow_batch_obj); + vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0); + if (IS_ERR(vma)) + goto out; memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry)); - vma = i915_gem_obj_to_ggtt(shadow_batch_obj); vma->exec_entry = shadow_exec_entry; vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN; - drm_gem_object_reference(&shadow_batch_obj->base); + i915_gem_object_get(shadow_batch_obj); list_add_tail(&vma->exec_list, &eb->vmas); - shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND; - - return shadow_batch_obj; - -err: +out: i915_gem_object_unpin_pages(shadow_batch_obj); - if (ret == -EACCES) /* unhandled chained batch */ - return batch_obj; - else - return ERR_PTR(ret); + return vma; } -int -i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, - struct drm_i915_gem_execbuffer2 *args, - struct list_head *vmas) +static int +execbuf_submit(struct i915_execbuffer_params *params, + struct drm_i915_gem_execbuffer2 *args, + struct list_head *vmas) { - struct drm_device *dev = params->dev; - struct intel_engine_cs *engine = params->engine; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = params->request->i915; u64 exec_start, exec_len; int instp_mode; u32 instp_mask; @@ -1239,34 +1451,31 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, if (ret) return ret; - WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id), - "%s didn't clear reload\n", engine->name); - instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; instp_mask = I915_EXEC_CONSTANTS_MASK; switch (instp_mode) { case I915_EXEC_CONSTANTS_REL_GENERAL: case I915_EXEC_CONSTANTS_ABSOLUTE: case I915_EXEC_CONSTANTS_REL_SURFACE: - if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) { + if (instp_mode != 0 && params->engine->id != RCS) { DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); return -EINVAL; } if (instp_mode != dev_priv->relative_constants_mode) { - if (INTEL_INFO(dev)->gen < 4) { + if (INTEL_INFO(dev_priv)->gen < 4) { DRM_DEBUG("no rel constants on pre-gen4\n"); return -EINVAL; } - if (INTEL_INFO(dev)->gen > 5 && + if (INTEL_INFO(dev_priv)->gen > 5 && instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) { DRM_DEBUG("rel surface constants mode invalid on gen5+\n"); return -EINVAL; } /* The HW changed the meaning on this bit on gen6 */ - if (INTEL_INFO(dev)->gen >= 6) + if (INTEL_INFO(dev_priv)->gen >= 6) instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE; } break; @@ -1275,37 +1484,39 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, return -EINVAL; } - if (engine == &dev_priv->engine[RCS] && + if (params->engine->id == RCS && instp_mode != dev_priv->relative_constants_mode) { + struct intel_ring *ring = params->request->ring; + ret = intel_ring_begin(params->request, 4); if (ret) return ret; - intel_ring_emit(engine, MI_NOOP); - intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(engine, INSTPM); - intel_ring_emit(engine, instp_mask << 16 | instp_mode); - intel_ring_advance(engine); + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit_reg(ring, INSTPM); + intel_ring_emit(ring, instp_mask << 16 | instp_mode); + intel_ring_advance(ring); dev_priv->relative_constants_mode = instp_mode; } if (args->flags & I915_EXEC_GEN7_SOL_RESET) { - ret = i915_reset_gen7_sol_offsets(dev, params->request); + ret = i915_reset_gen7_sol_offsets(params->request); if (ret) return ret; } exec_len = args->batch_len; - exec_start = params->batch_obj_vm_offset + + exec_start = params->batch->node.start + params->args_batch_start_offset; if (exec_len == 0) - exec_len = params->batch_obj->base.size; + exec_len = params->batch->size - params->args_batch_start_offset; - ret = engine->dispatch_execbuffer(params->request, - exec_start, exec_len, - params->dispatch_flags); + ret = params->engine->emit_bb_start(params->request, + exec_start, exec_len, + params->dispatch_flags); if (ret) return ret; @@ -1318,43 +1529,24 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, /** * Find one BSD ring to dispatch the corresponding BSD command. - * The ring index is returned. + * The engine index is returned. */ static unsigned int -gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file) +gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv, + struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; /* Check whether the file_priv has already selected one ring. */ - if ((int)file_priv->bsd_ring < 0) { + if ((int)file_priv->bsd_engine < 0) { /* If not, use the ping-pong mechanism to select one. */ mutex_lock(&dev_priv->drm.struct_mutex); - file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index; - dev_priv->mm.bsd_ring_dispatch_index ^= 1; + file_priv->bsd_engine = dev_priv->mm.bsd_engine_dispatch_index; + dev_priv->mm.bsd_engine_dispatch_index ^= 1; mutex_unlock(&dev_priv->drm.struct_mutex); } - return file_priv->bsd_ring; -} - -static struct drm_i915_gem_object * -eb_get_batch(struct eb_vmas *eb) -{ - struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list); - - /* - * SNA is doing fancy tricks with compressing batch buffers, which leads - * to negative relocation deltas. Usually that works out ok since the - * relocate address is still positive, except when the batch is placed - * very low in the GTT. Ensure this doesn't happen. - * - * Note that actual hangs have only been observed on gen7, but for - * paranoia do it everywhere. - */ - if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0) - vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS; - - return vma->obj; + return file_priv->bsd_engine; } #define I915_USER_RINGS (4) @@ -1367,31 +1559,31 @@ static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = { [I915_EXEC_VEBOX] = VECS }; -static int -eb_select_ring(struct drm_i915_private *dev_priv, - struct drm_file *file, - struct drm_i915_gem_execbuffer2 *args, - struct intel_engine_cs **ring) +static struct intel_engine_cs * +eb_select_engine(struct drm_i915_private *dev_priv, + struct drm_file *file, + struct drm_i915_gem_execbuffer2 *args) { unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK; + struct intel_engine_cs *engine; if (user_ring_id > I915_USER_RINGS) { DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id); - return -EINVAL; + return NULL; } if ((user_ring_id != I915_EXEC_BSD) && ((args->flags & I915_EXEC_BSD_MASK) != 0)) { DRM_DEBUG("execbuf with non bsd ring but with invalid " "bsd dispatch flags: %d\n", (int)(args->flags)); - return -EINVAL; + return NULL; } if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) { unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK; if (bsd_idx == I915_EXEC_BSD_DEFAULT) { - bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file); + bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file); } else if (bsd_idx >= I915_EXEC_BSD_RING1 && bsd_idx <= I915_EXEC_BSD_RING2) { bsd_idx >>= I915_EXEC_BSD_SHIFT; @@ -1399,20 +1591,20 @@ eb_select_ring(struct drm_i915_private *dev_priv, } else { DRM_DEBUG("execbuf with unknown bsd ring: %u\n", bsd_idx); - return -EINVAL; + return NULL; } - *ring = &dev_priv->engine[_VCS(bsd_idx)]; + engine = &dev_priv->engine[_VCS(bsd_idx)]; } else { - *ring = &dev_priv->engine[user_ring_map[user_ring_id]]; + engine = &dev_priv->engine[user_ring_map[user_ring_id]]; } - if (!intel_engine_initialized(*ring)) { + if (!intel_engine_initialized(engine)) { DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id); - return -EINVAL; + return NULL; } - return 0; + return engine; } static int @@ -1423,9 +1615,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, { struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; - struct drm_i915_gem_request *req = NULL; struct eb_vmas *eb; - struct drm_i915_gem_object *batch_obj; struct drm_i915_gem_exec_object2 shadow_exec_entry; struct intel_engine_cs *engine; struct i915_gem_context *ctx; @@ -1454,9 +1644,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (args->flags & I915_EXEC_IS_PINNED) dispatch_flags |= I915_DISPATCH_PINNED; - ret = eb_select_ring(dev_priv, file, args, &engine); - if (ret) - return ret; + engine = eb_select_engine(dev_priv, file, args); + if (!engine) + return -EINVAL; if (args->buffer_count < 1) { DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); @@ -1496,7 +1686,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto pre_mutex_err; } - i915_gem_context_reference(ctx); + i915_gem_context_get(ctx); if (ctx->ppgtt) vm = &ctx->ppgtt->base; @@ -1505,9 +1695,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, memset(¶ms_master, 0x00, sizeof(params_master)); - eb = eb_create(args); + eb = eb_create(dev_priv, args); if (eb == NULL) { - i915_gem_context_unreference(ctx); + i915_gem_context_put(ctx); mutex_unlock(&dev->struct_mutex); ret = -ENOMEM; goto pre_mutex_err; @@ -1519,7 +1709,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; /* take note of the batch buffer before we might reorder the lists */ - batch_obj = eb_get_batch(eb); + params->batch = eb_get_batch(eb); /* Move the objects en-masse into the GTT, evicting if necessary. */ need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; @@ -1543,34 +1733,34 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } /* Set the pending read domains for the batch buffer to COMMAND */ - if (batch_obj->base.pending_write_domain) { + if (params->batch->obj->base.pending_write_domain) { DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); ret = -EINVAL; goto err; } + if (args->batch_start_offset > params->batch->size || + args->batch_len > params->batch->size - args->batch_start_offset) { + DRM_DEBUG("Attempting to use out-of-bounds batch\n"); + ret = -EINVAL; + goto err; + } params->args_batch_start_offset = args->batch_start_offset; - if (i915_needs_cmd_parser(engine) && args->batch_len) { - struct drm_i915_gem_object *parsed_batch_obj; - - parsed_batch_obj = i915_gem_execbuffer_parse(engine, - &shadow_exec_entry, - eb, - batch_obj, - args->batch_start_offset, - args->batch_len, - drm_is_current_master(file)); - if (IS_ERR(parsed_batch_obj)) { - ret = PTR_ERR(parsed_batch_obj); + if (intel_engine_needs_cmd_parser(engine) && args->batch_len) { + struct i915_vma *vma; + + vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry, + params->batch->obj, + eb, + args->batch_start_offset, + args->batch_len, + drm_is_current_master(file)); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); goto err; } - /* - * parsed_batch_obj == batch_obj means batch not fully parsed: - * Accept, but don't promote to secure. - */ - - if (parsed_batch_obj != batch_obj) { + if (vma) { /* * Batch parsed and accepted: * @@ -1582,16 +1772,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, */ dispatch_flags |= I915_DISPATCH_SECURE; params->args_batch_start_offset = 0; - batch_obj = parsed_batch_obj; + params->batch = vma; } } - batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; + params->batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure * batch" bit. Hence we need to pin secure batches into the global gtt. * hsw should have this fixed, but bdw mucks it up again. */ if (dispatch_flags & I915_DISPATCH_SECURE) { + struct drm_i915_gem_object *obj = params->batch->obj; + struct i915_vma *vma; + /* * So on first glance it looks freaky that we pin the batch here * outside of the reservation loop. But: @@ -1602,22 +1795,31 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, * fitting due to fragmentation. * So this is actually safe. */ - ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0); - if (ret) + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); goto err; + } - params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj); - } else - params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm); + params->batch = vma; + } /* Allocate a request for this batch buffer nice and early. */ - req = i915_gem_request_alloc(engine, ctx); - if (IS_ERR(req)) { - ret = PTR_ERR(req); + params->request = i915_gem_request_alloc(engine, ctx); + if (IS_ERR(params->request)) { + ret = PTR_ERR(params->request); goto err_batch_unpin; } - ret = i915_gem_request_add_to_client(req, file); + /* Whilst this request exists, batch_obj will be on the + * active_list, and so will hold the active reference. Only when this + * request is retired will the the batch_obj be moved onto the + * inactive_list and lose its active reference. Hence we do not need + * to explicitly hold another reference here. + */ + params->request->batch = params->batch; + + ret = i915_gem_request_add_to_client(params->request, file); if (ret) goto err_request; @@ -1631,13 +1833,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, params->file = file; params->engine = engine; params->dispatch_flags = dispatch_flags; - params->batch_obj = batch_obj; params->ctx = ctx; - params->request = req; - ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas); + ret = execbuf_submit(params, args, &eb->vmas); err_request: - i915_gem_execbuffer_retire_commands(params); + __i915_add_request(params->request, ret == 0); err_batch_unpin: /* @@ -1647,11 +1847,10 @@ err_batch_unpin: * active. */ if (dispatch_flags & I915_DISPATCH_SECURE) - i915_gem_object_ggtt_unpin(batch_obj); - + i915_vma_unpin(params->batch); err: /* the request owns the ref now */ - i915_gem_context_unreference(ctx); + i915_gem_context_put(ctx); eb_destroy(eb); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c index 251d7a95af89..8df1fa7234e8 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence.c +++ b/drivers/gpu/drm/i915/i915_gem_fence.c @@ -55,226 +55,228 @@ * CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed. */ -static void i965_write_fence_reg(struct drm_device *dev, int reg, - struct drm_i915_gem_object *obj) +#define pipelined 0 + +static void i965_write_fence_reg(struct drm_i915_fence_reg *fence, + struct i915_vma *vma) { - struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t fence_reg_lo, fence_reg_hi; int fence_pitch_shift; + u64 val; - if (INTEL_INFO(dev)->gen >= 6) { - fence_reg_lo = FENCE_REG_GEN6_LO(reg); - fence_reg_hi = FENCE_REG_GEN6_HI(reg); + if (INTEL_INFO(fence->i915)->gen >= 6) { + fence_reg_lo = FENCE_REG_GEN6_LO(fence->id); + fence_reg_hi = FENCE_REG_GEN6_HI(fence->id); fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT; + } else { - fence_reg_lo = FENCE_REG_965_LO(reg); - fence_reg_hi = FENCE_REG_965_HI(reg); + fence_reg_lo = FENCE_REG_965_LO(fence->id); + fence_reg_hi = FENCE_REG_965_HI(fence->id); fence_pitch_shift = I965_FENCE_PITCH_SHIFT; } - /* To w/a incoherency with non-atomic 64-bit register updates, - * we split the 64-bit update into two 32-bit writes. In order - * for a partial fence not to be evaluated between writes, we - * precede the update with write to turn off the fence register, - * and only enable the fence as the last step. - * - * For extra levels of paranoia, we make sure each step lands - * before applying the next step. - */ - I915_WRITE(fence_reg_lo, 0); - POSTING_READ(fence_reg_lo); - - if (obj) { - u32 size = i915_gem_obj_ggtt_size(obj); - uint64_t val; - - /* Adjust fence size to match tiled area */ - if (obj->tiling_mode != I915_TILING_NONE) { - uint32_t row_size = obj->stride * - (obj->tiling_mode == I915_TILING_Y ? 32 : 8); - size = (size / row_size) * row_size; - } - - val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & - 0xfffff000) << 32; - val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; - val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; - if (obj->tiling_mode == I915_TILING_Y) - val |= 1 << I965_FENCE_TILING_Y_SHIFT; + val = 0; + if (vma) { + unsigned int tiling = i915_gem_object_get_tiling(vma->obj); + bool is_y_tiled = tiling == I915_TILING_Y; + unsigned int stride = i915_gem_object_get_stride(vma->obj); + u32 row_size = stride * (is_y_tiled ? 32 : 8); + u32 size = rounddown((u32)vma->node.size, row_size); + + val = ((vma->node.start + size - 4096) & 0xfffff000) << 32; + val |= vma->node.start & 0xfffff000; + val |= (u64)((stride / 128) - 1) << fence_pitch_shift; + if (is_y_tiled) + val |= BIT(I965_FENCE_TILING_Y_SHIFT); val |= I965_FENCE_REG_VALID; + } - I915_WRITE(fence_reg_hi, val >> 32); - POSTING_READ(fence_reg_hi); + if (!pipelined) { + struct drm_i915_private *dev_priv = fence->i915; - I915_WRITE(fence_reg_lo, val); + /* To w/a incoherency with non-atomic 64-bit register updates, + * we split the 64-bit update into two 32-bit writes. In order + * for a partial fence not to be evaluated between writes, we + * precede the update with write to turn off the fence register, + * and only enable the fence as the last step. + * + * For extra levels of paranoia, we make sure each step lands + * before applying the next step. + */ + I915_WRITE(fence_reg_lo, 0); + POSTING_READ(fence_reg_lo); + + I915_WRITE(fence_reg_hi, upper_32_bits(val)); + I915_WRITE(fence_reg_lo, lower_32_bits(val)); POSTING_READ(fence_reg_lo); - } else { - I915_WRITE(fence_reg_hi, 0); - POSTING_READ(fence_reg_hi); } } -static void i915_write_fence_reg(struct drm_device *dev, int reg, - struct drm_i915_gem_object *obj) +static void i915_write_fence_reg(struct drm_i915_fence_reg *fence, + struct i915_vma *vma) { - struct drm_i915_private *dev_priv = to_i915(dev); u32 val; - if (obj) { - u32 size = i915_gem_obj_ggtt_size(obj); + val = 0; + if (vma) { + unsigned int tiling = i915_gem_object_get_tiling(vma->obj); + bool is_y_tiled = tiling == I915_TILING_Y; + unsigned int stride = i915_gem_object_get_stride(vma->obj); int pitch_val; int tile_width; - WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) || - (size & -size) != size || - (i915_gem_obj_ggtt_offset(obj) & (size - 1)), - "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", - i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size); + WARN((vma->node.start & ~I915_FENCE_START_MASK) || + !is_power_of_2(vma->node.size) || + (vma->node.start & (vma->node.size - 1)), + "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08llx) aligned\n", + vma->node.start, + i915_vma_is_map_and_fenceable(vma), + vma->node.size); - if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) + if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915)) tile_width = 128; else tile_width = 512; /* Note: pitch better be a power of two tile widths */ - pitch_val = obj->stride / tile_width; + pitch_val = stride / tile_width; pitch_val = ffs(pitch_val) - 1; - val = i915_gem_obj_ggtt_offset(obj); - if (obj->tiling_mode == I915_TILING_Y) - val |= 1 << I830_FENCE_TILING_Y_SHIFT; - val |= I915_FENCE_SIZE_BITS(size); + val = vma->node.start; + if (is_y_tiled) + val |= BIT(I830_FENCE_TILING_Y_SHIFT); + val |= I915_FENCE_SIZE_BITS(vma->node.size); val |= pitch_val << I830_FENCE_PITCH_SHIFT; val |= I830_FENCE_REG_VALID; - } else - val = 0; + } + + if (!pipelined) { + struct drm_i915_private *dev_priv = fence->i915; + i915_reg_t reg = FENCE_REG(fence->id); - I915_WRITE(FENCE_REG(reg), val); - POSTING_READ(FENCE_REG(reg)); + I915_WRITE(reg, val); + POSTING_READ(reg); + } } -static void i830_write_fence_reg(struct drm_device *dev, int reg, - struct drm_i915_gem_object *obj) +static void i830_write_fence_reg(struct drm_i915_fence_reg *fence, + struct i915_vma *vma) { - struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t val; + u32 val; - if (obj) { - u32 size = i915_gem_obj_ggtt_size(obj); - uint32_t pitch_val; + val = 0; + if (vma) { + unsigned int tiling = i915_gem_object_get_tiling(vma->obj); + bool is_y_tiled = tiling == I915_TILING_Y; + unsigned int stride = i915_gem_object_get_stride(vma->obj); + u32 pitch_val; - WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) || - (size & -size) != size || - (i915_gem_obj_ggtt_offset(obj) & (size - 1)), - "object 0x%08llx not 512K or pot-size 0x%08x aligned\n", - i915_gem_obj_ggtt_offset(obj), size); + WARN((vma->node.start & ~I830_FENCE_START_MASK) || + !is_power_of_2(vma->node.size) || + (vma->node.start & (vma->node.size - 1)), + "object 0x%08llx not 512K or pot-size 0x%08llx aligned\n", + vma->node.start, vma->node.size); - pitch_val = obj->stride / 128; + pitch_val = stride / 128; pitch_val = ffs(pitch_val) - 1; - val = i915_gem_obj_ggtt_offset(obj); - if (obj->tiling_mode == I915_TILING_Y) - val |= 1 << I830_FENCE_TILING_Y_SHIFT; - val |= I830_FENCE_SIZE_BITS(size); + val = vma->node.start; + if (is_y_tiled) + val |= BIT(I830_FENCE_TILING_Y_SHIFT); + val |= I830_FENCE_SIZE_BITS(vma->node.size); val |= pitch_val << I830_FENCE_PITCH_SHIFT; val |= I830_FENCE_REG_VALID; - } else - val = 0; + } - I915_WRITE(FENCE_REG(reg), val); - POSTING_READ(FENCE_REG(reg)); -} + if (!pipelined) { + struct drm_i915_private *dev_priv = fence->i915; + i915_reg_t reg = FENCE_REG(fence->id); -inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj) -{ - return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT; + I915_WRITE(reg, val); + POSTING_READ(reg); + } } -static void i915_gem_write_fence(struct drm_device *dev, int reg, - struct drm_i915_gem_object *obj) +static void fence_write(struct drm_i915_fence_reg *fence, + struct i915_vma *vma) { - struct drm_i915_private *dev_priv = to_i915(dev); - - /* Ensure that all CPU reads are completed before installing a fence - * and all writes before removing the fence. + /* Previous access through the fence register is marshalled by + * the mb() inside the fault handlers (i915_gem_release_mmaps) + * and explicitly managed for internal users. */ - if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) - mb(); - - WARN(obj && (!obj->stride || !obj->tiling_mode), - "bogus fence setup with stride: 0x%x, tiling mode: %i\n", - obj->stride, obj->tiling_mode); - - if (IS_GEN2(dev)) - i830_write_fence_reg(dev, reg, obj); - else if (IS_GEN3(dev)) - i915_write_fence_reg(dev, reg, obj); - else if (INTEL_INFO(dev)->gen >= 4) - i965_write_fence_reg(dev, reg, obj); - - /* And similarly be paranoid that no direct access to this region - * is reordered to before the fence is installed. + + if (IS_GEN2(fence->i915)) + i830_write_fence_reg(fence, vma); + else if (IS_GEN3(fence->i915)) + i915_write_fence_reg(fence, vma); + else + i965_write_fence_reg(fence, vma); + + /* Access through the fenced region afterwards is + * ordered by the posting reads whilst writing the registers. */ - if (i915_gem_object_needs_mb(obj)) - mb(); -} -static inline int fence_number(struct drm_i915_private *dev_priv, - struct drm_i915_fence_reg *fence) -{ - return fence - dev_priv->fence_regs; + fence->dirty = false; } -static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, - struct drm_i915_fence_reg *fence, - bool enable) +static int fence_update(struct drm_i915_fence_reg *fence, + struct i915_vma *vma) { - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - int reg = fence_number(dev_priv, fence); - - i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); + int ret; - if (enable) { - obj->fence_reg = reg; - fence->obj = obj; - list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); - } else { - obj->fence_reg = I915_FENCE_REG_NONE; - fence->obj = NULL; - list_del_init(&fence->lru_list); - } - obj->fence_dirty = false; -} + if (vma) { + if (!i915_vma_is_map_and_fenceable(vma)) + return -EINVAL; -static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) -{ - if (obj->tiling_mode) - i915_gem_release_mmap(obj); + if (WARN(!i915_gem_object_get_stride(vma->obj) || + !i915_gem_object_get_tiling(vma->obj), + "bogus fence setup with stride: 0x%x, tiling mode: %i\n", + i915_gem_object_get_stride(vma->obj), + i915_gem_object_get_tiling(vma->obj))) + return -EINVAL; - /* As we do not have an associated fence register, we will force - * a tiling change if we ever need to acquire one. - */ - obj->fence_dirty = false; - obj->fence_reg = I915_FENCE_REG_NONE; -} + ret = i915_gem_active_retire(&vma->last_fence, + &vma->obj->base.dev->struct_mutex); + if (ret) + return ret; + } -static int -i915_gem_object_wait_fence(struct drm_i915_gem_object *obj) -{ - if (obj->last_fenced_req) { - int ret = i915_wait_request(obj->last_fenced_req); + if (fence->vma) { + ret = i915_gem_active_retire(&fence->vma->last_fence, + &fence->vma->obj->base.dev->struct_mutex); if (ret) return ret; + } + + if (fence->vma && fence->vma != vma) { + /* Ensure that all userspace CPU access is completed before + * stealing the fence. + */ + i915_gem_release_mmap(fence->vma->obj); + + fence->vma->fence = NULL; + fence->vma = NULL; + + list_move(&fence->link, &fence->i915->mm.fence_list); + } + + fence_write(fence, vma); + + if (vma) { + if (fence->vma != vma) { + vma->fence = fence; + fence->vma = vma; + } - i915_gem_request_assign(&obj->last_fenced_req, NULL); + list_move_tail(&fence->link, &fence->i915->mm.fence_list); } return 0; } /** - * i915_gem_object_put_fence - force-remove fence for an object - * @obj: object to map through a fence reg + * i915_vma_put_fence - force-remove fence for a VMA + * @vma: vma to map linearly (not through a fence reg) * * This function force-removes any fence from the given object, which is useful * if the kernel wants to do untiled GTT access. @@ -284,70 +286,40 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj) * 0 on success, negative error code on failure. */ int -i915_gem_object_put_fence(struct drm_i915_gem_object *obj) +i915_vma_put_fence(struct i915_vma *vma) { - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - struct drm_i915_fence_reg *fence; - int ret; + struct drm_i915_fence_reg *fence = vma->fence; - ret = i915_gem_object_wait_fence(obj); - if (ret) - return ret; - - if (obj->fence_reg == I915_FENCE_REG_NONE) + if (!fence) return 0; - fence = &dev_priv->fence_regs[obj->fence_reg]; - - if (WARN_ON(fence->pin_count)) + if (fence->pin_count) return -EBUSY; - i915_gem_object_fence_lost(obj); - i915_gem_object_update_fence(obj, fence, false); - - return 0; + return fence_update(fence, NULL); } -static struct drm_i915_fence_reg * -i915_find_fence_reg(struct drm_device *dev) +static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_i915_fence_reg *reg, *avail; - int i; - - /* First try to find a free reg */ - avail = NULL; - for (i = 0; i < dev_priv->num_fence_regs; i++) { - reg = &dev_priv->fence_regs[i]; - if (!reg->obj) - return reg; - - if (!reg->pin_count) - avail = reg; - } - - if (avail == NULL) - goto deadlock; + struct drm_i915_fence_reg *fence; - /* None available, try to steal one or wait for a user to finish */ - list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { - if (reg->pin_count) + list_for_each_entry(fence, &dev_priv->mm.fence_list, link) { + if (fence->pin_count) continue; - return reg; + return fence; } -deadlock: /* Wait for completion of pending flips which consume fences */ - if (intel_has_pending_fb_unpin(dev)) + if (intel_has_pending_fb_unpin(&dev_priv->drm)) return ERR_PTR(-EAGAIN); return ERR_PTR(-EDEADLK); } /** - * i915_gem_object_get_fence - set up fencing for an object - * @obj: object to map through a fence reg + * i915_vma_get_fence - set up fencing for a vma + * @vma: vma to map through a fence reg * * When mapping objects through the GTT, userspace wants to be able to write * to them without having to worry about swizzling if the object is tiled. @@ -364,103 +336,27 @@ deadlock: * 0 on success, negative error code on failure. */ int -i915_gem_object_get_fence(struct drm_i915_gem_object *obj) +i915_vma_get_fence(struct i915_vma *vma) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - bool enable = obj->tiling_mode != I915_TILING_NONE; - struct drm_i915_fence_reg *reg; - int ret; - - /* Have we updated the tiling parameters upon the object and so - * will need to serialise the write to the associated fence register? - */ - if (obj->fence_dirty) { - ret = i915_gem_object_wait_fence(obj); - if (ret) - return ret; - } + struct drm_i915_fence_reg *fence; + struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL; /* Just update our place in the LRU if our fence is getting reused. */ - if (obj->fence_reg != I915_FENCE_REG_NONE) { - reg = &dev_priv->fence_regs[obj->fence_reg]; - if (!obj->fence_dirty) { - list_move_tail(®->lru_list, - &dev_priv->mm.fence_list); + if (vma->fence) { + fence = vma->fence; + if (!fence->dirty) { + list_move_tail(&fence->link, + &fence->i915->mm.fence_list); return 0; } - } else if (enable) { - if (WARN_ON(!obj->map_and_fenceable)) - return -EINVAL; - - reg = i915_find_fence_reg(dev); - if (IS_ERR(reg)) - return PTR_ERR(reg); - - if (reg->obj) { - struct drm_i915_gem_object *old = reg->obj; - - ret = i915_gem_object_wait_fence(old); - if (ret) - return ret; - - i915_gem_object_fence_lost(old); - } + } else if (set) { + fence = fence_find(to_i915(vma->vm->dev)); + if (IS_ERR(fence)) + return PTR_ERR(fence); } else return 0; - i915_gem_object_update_fence(obj, reg, enable); - - return 0; -} - -/** - * i915_gem_object_pin_fence - pin fencing state - * @obj: object to pin fencing for - * - * This pins the fencing state (whether tiled or untiled) to make sure the - * object is ready to be used as a scanout target. Fencing status must be - * synchronize first by calling i915_gem_object_get_fence(): - * - * The resulting fence pin reference must be released again with - * i915_gem_object_unpin_fence(). - * - * Returns: - * - * True if the object has a fence, false otherwise. - */ -bool -i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) -{ - if (obj->fence_reg != I915_FENCE_REG_NONE) { - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj); - - WARN_ON(!ggtt_vma || - dev_priv->fence_regs[obj->fence_reg].pin_count > - ggtt_vma->pin_count); - dev_priv->fence_regs[obj->fence_reg].pin_count++; - return true; - } else - return false; -} - -/** - * i915_gem_object_unpin_fence - unpin fencing state - * @obj: object to unpin fencing for - * - * This releases the fence pin reference acquired through - * i915_gem_object_pin_fence. It will handle both objects with and without an - * attached fence correctly, callers do not need to distinguish this. - */ -void -i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) -{ - if (obj->fence_reg != I915_FENCE_REG_NONE) { - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0); - dev_priv->fence_regs[obj->fence_reg].pin_count--; - } + return fence_update(fence, set); } /** @@ -477,17 +373,16 @@ void i915_gem_restore_fences(struct drm_device *dev) for (i = 0; i < dev_priv->num_fence_regs; i++) { struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; + struct i915_vma *vma = reg->vma; /* * Commit delayed tiling changes if we have an object still * attached to the fence, otherwise just clear the fence. */ - if (reg->obj) { - i915_gem_object_update_fence(reg->obj, reg, - reg->obj->tiling_mode); - } else { - i915_gem_write_fence(dev, i, NULL); - } + if (vma && !i915_gem_object_is_tiled(vma->obj)) + vma = NULL; + + fence_update(reg, vma); } } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 10f1e32767e6..b90fdcee992a 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -170,11 +170,13 @@ static int ppgtt_bind_vma(struct i915_vma *vma, { u32 pte_flags = 0; + vma->pages = vma->obj->pages; + /* Currently applicable only to VLV */ if (vma->obj->gt_ro) pte_flags |= PTE_READ_ONLY; - vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, + vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start, cache_level, pte_flags); return 0; @@ -184,7 +186,7 @@ static void ppgtt_unbind_vma(struct i915_vma *vma) { vma->vm->clear_range(vma->vm, vma->node.start, - vma->obj->base.size, + vma->size, true); } @@ -669,6 +671,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req, unsigned entry, dma_addr_t addr) { + struct intel_ring *ring = req->ring; struct intel_engine_cs *engine = req->engine; int ret; @@ -678,13 +681,13 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req, if (ret) return ret; - intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(engine, GEN8_RING_PDP_UDW(engine, entry)); - intel_ring_emit(engine, upper_32_bits(addr)); - intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(engine, GEN8_RING_PDP_LDW(engine, entry)); - intel_ring_emit(engine, lower_32_bits(addr)); - intel_ring_advance(engine); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, entry)); + intel_ring_emit(ring, upper_32_bits(addr)); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, entry)); + intel_ring_emit(ring, lower_32_bits(addr)); + intel_ring_advance(ring); return 0; } @@ -1660,11 +1663,12 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_request *req) { + struct intel_ring *ring = req->ring; struct intel_engine_cs *engine = req->engine; int ret; /* NB: TLBs must be flushed and invalidated before a switch */ - ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); + ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH); if (ret) return ret; @@ -1672,13 +1676,13 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, if (ret) return ret; - intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2)); - intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine)); - intel_ring_emit(engine, PP_DIR_DCLV_2G); - intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine)); - intel_ring_emit(engine, get_pd_offset(ppgtt)); - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); + intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine)); + intel_ring_emit(ring, PP_DIR_DCLV_2G); + intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine)); + intel_ring_emit(ring, get_pd_offset(ppgtt)); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } @@ -1686,11 +1690,12 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_request *req) { + struct intel_ring *ring = req->ring; struct intel_engine_cs *engine = req->engine; int ret; /* NB: TLBs must be flushed and invalidated before a switch */ - ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); + ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH); if (ret) return ret; @@ -1698,17 +1703,17 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, if (ret) return ret; - intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2)); - intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine)); - intel_ring_emit(engine, PP_DIR_DCLV_2G); - intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine)); - intel_ring_emit(engine, get_pd_offset(ppgtt)); - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); + intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine)); + intel_ring_emit(ring, PP_DIR_DCLV_2G); + intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine)); + intel_ring_emit(ring, get_pd_offset(ppgtt)); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); /* XXX: RCS is the only one to auto invalidate the TLBs? */ if (engine->id != RCS) { - ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); + ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH); if (ret) return ret; } @@ -2009,7 +2014,7 @@ alloc: 0, ggtt->base.total, DRM_MM_TOPDOWN); if (ret == -ENOSPC && !retried) { - ret = i915_gem_evict_something(dev, &ggtt->base, + ret = i915_gem_evict_something(&ggtt->base, GEN6_PD_SIZE, GEN6_PD_ALIGN, I915_CACHE_NONE, 0, ggtt->base.total, @@ -2101,11 +2106,12 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) return 0; } -static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) +static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt, + struct drm_i915_private *dev_priv) { - ppgtt->base.dev = dev; + ppgtt->base.dev = &dev_priv->drm; - if (INTEL_INFO(dev)->gen < 8) + if (INTEL_INFO(dev_priv)->gen < 8) return gen6_ppgtt_init(ppgtt); else return gen8_ppgtt_init(ppgtt); @@ -2115,9 +2121,9 @@ static void i915_address_space_init(struct i915_address_space *vm, struct drm_i915_private *dev_priv) { drm_mm_init(&vm->mm, vm->start, vm->total); - vm->dev = &dev_priv->drm; INIT_LIST_HEAD(&vm->active_list); INIT_LIST_HEAD(&vm->inactive_list); + INIT_LIST_HEAD(&vm->unbound_list); list_add_tail(&vm->global_link, &dev_priv->vm_list); } @@ -2140,15 +2146,17 @@ static void gtt_write_workarounds(struct drm_device *dev) I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); } -static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) +static int i915_ppgtt_init(struct i915_hw_ppgtt *ppgtt, + struct drm_i915_private *dev_priv, + struct drm_i915_file_private *file_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - int ret = 0; + int ret; - ret = __hw_ppgtt_init(dev, ppgtt); + ret = __hw_ppgtt_init(ppgtt, dev_priv); if (ret == 0) { kref_init(&ppgtt->ref); i915_address_space_init(&ppgtt->base, dev_priv); + ppgtt->base.file = file_priv; } return ret; @@ -2180,7 +2188,8 @@ int i915_ppgtt_init_hw(struct drm_device *dev) } struct i915_hw_ppgtt * -i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv) +i915_ppgtt_create(struct drm_i915_private *dev_priv, + struct drm_i915_file_private *fpriv) { struct i915_hw_ppgtt *ppgtt; int ret; @@ -2189,14 +2198,12 @@ i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv) if (!ppgtt) return ERR_PTR(-ENOMEM); - ret = i915_ppgtt_init(dev, ppgtt); + ret = i915_ppgtt_init(ppgtt, dev_priv, fpriv); if (ret) { kfree(ppgtt); return ERR_PTR(ret); } - ppgtt->file_priv = fpriv; - trace_i915_ppgtt_create(&ppgtt->base); return ppgtt; @@ -2209,9 +2216,10 @@ void i915_ppgtt_release(struct kref *kref) trace_i915_ppgtt_release(&ppgtt->base); - /* vmas should already be unbound */ + /* vmas should already be unbound and destroyed */ WARN_ON(!list_empty(&ppgtt->base.active_list)); WARN_ON(!list_empty(&ppgtt->base.inactive_list)); + WARN_ON(!list_empty(&ppgtt->base.unbound_list)); list_del(&ppgtt->base.global_link); drm_mm_takedown(&ppgtt->base.mm); @@ -2220,47 +2228,21 @@ void i915_ppgtt_release(struct kref *kref) kfree(ppgtt); } -extern int intel_iommu_gfx_mapped; /* Certain Gen5 chipsets require require idling the GPU before * unmapping anything from the GTT when VT-d is enabled. */ -static bool needs_idle_maps(struct drm_device *dev) +static bool needs_idle_maps(struct drm_i915_private *dev_priv) { #ifdef CONFIG_INTEL_IOMMU /* Query intel_iommu to see if we need the workaround. Presumably that * was loaded first. */ - if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped) + if (IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_iommu_gfx_mapped) return true; #endif return false; } -static bool do_idling(struct drm_i915_private *dev_priv) -{ - struct i915_ggtt *ggtt = &dev_priv->ggtt; - bool ret = dev_priv->mm.interruptible; - - if (unlikely(ggtt->do_idle_maps)) { - dev_priv->mm.interruptible = false; - if (i915_gem_wait_for_idle(dev_priv)) { - DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); - /* Wait a bit, in hopes it avoids the hang */ - udelay(10); - } - } - - return ret; -} - -static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) -{ - struct i915_ggtt *ggtt = &dev_priv->ggtt; - - if (unlikely(ggtt->do_idle_maps)) - dev_priv->mm.interruptible = interruptible; -} - void i915_check_and_clear_faults(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; @@ -2638,8 +2620,7 @@ static int ggtt_bind_vma(struct i915_vma *vma, if (obj->gt_ro) pte_flags |= PTE_READ_ONLY; - vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages, - vma->node.start, + vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start, cache_level, pte_flags); /* @@ -2647,7 +2628,7 @@ static int ggtt_bind_vma(struct i915_vma *vma, * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally * upgrade to both bound if we bind either to avoid double-binding. */ - vma->bound |= GLOBAL_BIND | LOCAL_BIND; + vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; return 0; } @@ -2669,19 +2650,17 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, pte_flags |= PTE_READ_ONLY; - if (flags & GLOBAL_BIND) { + if (flags & I915_VMA_GLOBAL_BIND) { vma->vm->insert_entries(vma->vm, - vma->ggtt_view.pages, - vma->node.start, + vma->pages, vma->node.start, cache_level, pte_flags); } - if (flags & LOCAL_BIND) { + if (flags & I915_VMA_LOCAL_BIND) { struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt; appgtt->base.insert_entries(&appgtt->base, - vma->ggtt_view.pages, - vma->node.start, + vma->pages, vma->node.start, cache_level, pte_flags); } @@ -2690,42 +2669,36 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, static void ggtt_unbind_vma(struct i915_vma *vma) { - struct drm_device *dev = vma->vm->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_i915_gem_object *obj = vma->obj; - const uint64_t size = min_t(uint64_t, - obj->base.size, - vma->node.size); + struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt; + const u64 size = min(vma->size, vma->node.size); - if (vma->bound & GLOBAL_BIND) { + if (vma->flags & I915_VMA_GLOBAL_BIND) vma->vm->clear_range(vma->vm, - vma->node.start, - size, + vma->node.start, size, true); - } - - if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) { - struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; + if (vma->flags & I915_VMA_LOCAL_BIND && appgtt) appgtt->base.clear_range(&appgtt->base, - vma->node.start, - size, + vma->node.start, size, true); - } } void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - bool interruptible; + struct i915_ggtt *ggtt = &dev_priv->ggtt; - interruptible = do_idling(dev_priv); + if (unlikely(ggtt->do_idle_maps)) { + if (i915_gem_wait_for_idle(dev_priv, false)) { + DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); + /* Wait a bit, in hopes it avoids the hang */ + udelay(10); + } + } dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents, PCI_DMA_BIDIRECTIONAL); - - undo_idling(dev_priv, interruptible); } static void i915_gtt_color_adjust(struct drm_mm_node *node, @@ -2736,19 +2709,14 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node, if (node->color != color) *start += 4096; - if (!list_empty(&node->node_list)) { - node = list_entry(node->node_list.next, - struct drm_mm_node, - node_list); - if (node->allocated && node->color != color) - *end -= 4096; - } + node = list_first_entry_or_null(&node->node_list, + struct drm_mm_node, + node_list); + if (node && node->allocated && node->color != color) + *end -= 4096; } -static int i915_gem_setup_global_gtt(struct drm_device *dev, - u64 start, - u64 mappable_end, - u64 end) +int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) { /* Let GEM Manage all of the aperture. * @@ -2759,48 +2727,15 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, * aperture. One page should be enough to keep any prefetching inside * of the aperture. */ - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; - struct drm_mm_node *entry; - struct drm_i915_gem_object *obj; unsigned long hole_start, hole_end; + struct drm_mm_node *entry; int ret; - BUG_ON(mappable_end > end); - - ggtt->base.start = start; - - /* Subtract the guard page before address space initialization to - * shrink the range used by drm_mm */ - ggtt->base.total = end - start - PAGE_SIZE; - i915_address_space_init(&ggtt->base, dev_priv); - ggtt->base.total += PAGE_SIZE; - ret = intel_vgt_balloon(dev_priv); if (ret) return ret; - if (!HAS_LLC(dev)) - ggtt->base.mm.color_adjust = i915_gtt_color_adjust; - - /* Mark any preallocated objects as occupied */ - list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - struct i915_vma *vma = i915_gem_obj_to_vma(obj, &ggtt->base); - - DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n", - i915_gem_obj_ggtt_offset(obj), obj->base.size); - - WARN_ON(i915_gem_obj_ggtt_bound(obj)); - ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node); - if (ret) { - DRM_DEBUG_KMS("Reservation failed: %i\n", ret); - return ret; - } - vma->bound |= GLOBAL_BIND; - __i915_vma_set_map_and_fenceable(vma); - list_add_tail(&vma->vm_link, &ggtt->base.inactive_list); - } - /* Clear any non-preallocated blocks */ drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) { DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", @@ -2810,18 +2745,19 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, } /* And finally clear the reserved guard page */ - ggtt->base.clear_range(&ggtt->base, end - PAGE_SIZE, PAGE_SIZE, true); + ggtt->base.clear_range(&ggtt->base, + ggtt->base.total - PAGE_SIZE, PAGE_SIZE, + true); - if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) { + if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) { struct i915_hw_ppgtt *ppgtt; ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); if (!ppgtt) return -ENOMEM; - ret = __hw_ppgtt_init(dev, ppgtt); + ret = __hw_ppgtt_init(ppgtt, dev_priv); if (ret) { - ppgtt->base.cleanup(&ppgtt->base); kfree(ppgtt); return ret; } @@ -2849,33 +2785,20 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, } /** - * i915_gem_init_ggtt - Initialize GEM for Global GTT - * @dev: DRM device - */ -void i915_gem_init_ggtt(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; - - i915_gem_setup_global_gtt(dev, 0, ggtt->mappable_end, ggtt->base.total); -} - -/** * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization - * @dev: DRM device + * @dev_priv: i915 device */ -void i915_ggtt_cleanup_hw(struct drm_device *dev) +void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; if (dev_priv->mm.aliasing_ppgtt) { struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; - ppgtt->base.cleanup(&ppgtt->base); + kfree(ppgtt); } - i915_gem_cleanup_stolen(dev); + i915_gem_cleanup_stolen(&dev_priv->drm); if (drm_mm_initialized(&ggtt->base.mm)) { intel_vgt_deballoon(dev_priv); @@ -2885,6 +2808,9 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev) } ggtt->base.cleanup(&ggtt->base); + + arch_phys_wc_del(ggtt->mtrr); + io_mapping_fini(&ggtt->mappable); } static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) @@ -2965,17 +2891,14 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl) return (gen9_gmch_ctl - 0xf0 + 1) << 22; } -static int ggtt_probe_common(struct drm_device *dev, - size_t gtt_size) +static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct pci_dev *pdev = ggtt->base.dev->pdev; struct i915_page_scratch *scratch_page; - phys_addr_t ggtt_phys_addr; + phys_addr_t phys_addr; /* For Modern GENs the PTEs and register space are split in the BAR */ - ggtt_phys_addr = pci_resource_start(dev->pdev, 0) + - (pci_resource_len(dev->pdev, 0) / 2); + phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2; /* * On BXT writes larger than 64 bit to the GTT pagetable range will be @@ -2984,16 +2907,16 @@ static int ggtt_probe_common(struct drm_device *dev, * resort to an uncached mapping. The WC issue is easily caught by the * readback check when writing GTT PTE entries. */ - if (IS_BROXTON(dev)) - ggtt->gsm = ioremap_nocache(ggtt_phys_addr, gtt_size); + if (IS_BROXTON(ggtt->base.dev)) + ggtt->gsm = ioremap_nocache(phys_addr, size); else - ggtt->gsm = ioremap_wc(ggtt_phys_addr, gtt_size); + ggtt->gsm = ioremap_wc(phys_addr, size); if (!ggtt->gsm) { - DRM_ERROR("Failed to map the gtt page table\n"); + DRM_ERROR("Failed to map the ggtt page table\n"); return -ENOMEM; } - scratch_page = alloc_scratch_page(dev); + scratch_page = alloc_scratch_page(ggtt->base.dev); if (IS_ERR(scratch_page)) { DRM_ERROR("Scratch setup failed\n"); /* iounmap will also get called at remove, but meh */ @@ -3079,42 +3002,49 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32); } +static void gen6_gmch_remove(struct i915_address_space *vm) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + + iounmap(ggtt->gsm); + free_scratch_page(vm->dev, vm->scratch_page); +} + static int gen8_gmch_probe(struct i915_ggtt *ggtt) { - struct drm_device *dev = ggtt->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev); + struct pci_dev *pdev = dev_priv->drm.pdev; + unsigned int size; u16 snb_gmch_ctl; - int ret; /* TODO: We're not aware of mappable constraints on gen8 yet */ - ggtt->mappable_base = pci_resource_start(dev->pdev, 2); - ggtt->mappable_end = pci_resource_len(dev->pdev, 2); + ggtt->mappable_base = pci_resource_start(pdev, 2); + ggtt->mappable_end = pci_resource_len(pdev, 2); - if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39))) - pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39)); + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(39))) + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39)); - pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); + pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); - if (INTEL_INFO(dev)->gen >= 9) { + if (INTEL_GEN(dev_priv) >= 9) { ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl); - ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl); - } else if (IS_CHERRYVIEW(dev)) { + size = gen8_get_total_gtt_size(snb_gmch_ctl); + } else if (IS_CHERRYVIEW(dev_priv)) { ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl); - ggtt->size = chv_get_total_gtt_size(snb_gmch_ctl); + size = chv_get_total_gtt_size(snb_gmch_ctl); } else { ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl); - ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl); + size = gen8_get_total_gtt_size(snb_gmch_ctl); } - ggtt->base.total = (ggtt->size / sizeof(gen8_pte_t)) << PAGE_SHIFT; + ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT; - if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) + if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv)) chv_setup_private_ppat(dev_priv); else bdw_setup_private_ppat(dev_priv); - ret = ggtt_probe_common(dev, ggtt->size); - + ggtt->base.cleanup = gen6_gmch_remove; ggtt->base.bind_vma = ggtt_bind_vma; ggtt->base.unbind_vma = ggtt_unbind_vma; ggtt->base.insert_page = gen8_ggtt_insert_page; @@ -3126,57 +3056,65 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) if (IS_CHERRYVIEW(dev_priv)) ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL; - return ret; + return ggtt_probe_common(ggtt, size); } static int gen6_gmch_probe(struct i915_ggtt *ggtt) { - struct drm_device *dev = ggtt->base.dev; + struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev); + struct pci_dev *pdev = dev_priv->drm.pdev; + unsigned int size; u16 snb_gmch_ctl; - int ret; - ggtt->mappable_base = pci_resource_start(dev->pdev, 2); - ggtt->mappable_end = pci_resource_len(dev->pdev, 2); + ggtt->mappable_base = pci_resource_start(pdev, 2); + ggtt->mappable_end = pci_resource_len(pdev, 2); /* 64/512MB is the current min/max we actually know of, but this is just * a coarse sanity check. */ - if ((ggtt->mappable_end < (64<<20) || (ggtt->mappable_end > (512<<20)))) { + if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) { DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end); return -ENXIO; } - if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40))) - pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); - pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(40))) + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); + pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl); - ggtt->size = gen6_get_total_gtt_size(snb_gmch_ctl); - ggtt->base.total = (ggtt->size / sizeof(gen6_pte_t)) << PAGE_SHIFT; - ret = ggtt_probe_common(dev, ggtt->size); + size = gen6_get_total_gtt_size(snb_gmch_ctl); + ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT; ggtt->base.clear_range = gen6_ggtt_clear_range; ggtt->base.insert_page = gen6_ggtt_insert_page; ggtt->base.insert_entries = gen6_ggtt_insert_entries; ggtt->base.bind_vma = ggtt_bind_vma; ggtt->base.unbind_vma = ggtt_unbind_vma; + ggtt->base.cleanup = gen6_gmch_remove; + + if (HAS_EDRAM(dev_priv)) + ggtt->base.pte_encode = iris_pte_encode; + else if (IS_HASWELL(dev_priv)) + ggtt->base.pte_encode = hsw_pte_encode; + else if (IS_VALLEYVIEW(dev_priv)) + ggtt->base.pte_encode = byt_pte_encode; + else if (INTEL_GEN(dev_priv) >= 7) + ggtt->base.pte_encode = ivb_pte_encode; + else + ggtt->base.pte_encode = snb_pte_encode; - return ret; + return ggtt_probe_common(ggtt, size); } -static void gen6_gmch_remove(struct i915_address_space *vm) +static void i915_gmch_remove(struct i915_address_space *vm) { - struct i915_ggtt *ggtt = container_of(vm, struct i915_ggtt, base); - - iounmap(ggtt->gsm); - free_scratch_page(vm->dev, vm->scratch_page); + intel_gmch_remove(); } static int i915_gmch_probe(struct i915_ggtt *ggtt) { - struct drm_device *dev = ggtt->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev); int ret; ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL); @@ -3188,12 +3126,13 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size, &ggtt->mappable_base, &ggtt->mappable_end); - ggtt->do_idle_maps = needs_idle_maps(&dev_priv->drm); + ggtt->do_idle_maps = needs_idle_maps(dev_priv); ggtt->base.insert_page = i915_ggtt_insert_page; ggtt->base.insert_entries = i915_ggtt_insert_entries; ggtt->base.clear_range = i915_ggtt_clear_range; ggtt->base.bind_vma = ggtt_bind_vma; ggtt->base.unbind_vma = ggtt_unbind_vma; + ggtt->base.cleanup = i915_gmch_remove; if (unlikely(ggtt->do_idle_maps)) DRM_INFO("applying Ironlake quirks for intel_iommu\n"); @@ -3201,65 +3140,40 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) return 0; } -static void i915_gmch_remove(struct i915_address_space *vm) -{ - intel_gmch_remove(); -} - /** - * i915_ggtt_init_hw - Initialize GGTT hardware - * @dev: DRM device + * i915_ggtt_probe_hw - Probe GGTT hardware location + * @dev_priv: i915 device */ -int i915_ggtt_init_hw(struct drm_device *dev) +int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; int ret; - if (INTEL_INFO(dev)->gen <= 5) { - ggtt->probe = i915_gmch_probe; - ggtt->base.cleanup = i915_gmch_remove; - } else if (INTEL_INFO(dev)->gen < 8) { - ggtt->probe = gen6_gmch_probe; - ggtt->base.cleanup = gen6_gmch_remove; - - if (HAS_EDRAM(dev)) - ggtt->base.pte_encode = iris_pte_encode; - else if (IS_HASWELL(dev)) - ggtt->base.pte_encode = hsw_pte_encode; - else if (IS_VALLEYVIEW(dev)) - ggtt->base.pte_encode = byt_pte_encode; - else if (INTEL_INFO(dev)->gen >= 7) - ggtt->base.pte_encode = ivb_pte_encode; - else - ggtt->base.pte_encode = snb_pte_encode; - } else { - ggtt->probe = gen8_gmch_probe; - ggtt->base.cleanup = gen6_gmch_remove; - } - - ggtt->base.dev = dev; - ggtt->base.is_ggtt = true; + ggtt->base.dev = &dev_priv->drm; - ret = ggtt->probe(ggtt); + if (INTEL_GEN(dev_priv) <= 5) + ret = i915_gmch_probe(ggtt); + else if (INTEL_GEN(dev_priv) < 8) + ret = gen6_gmch_probe(ggtt); + else + ret = gen8_gmch_probe(ggtt); if (ret) return ret; if ((ggtt->base.total - 1) >> 32) { DRM_ERROR("We never expected a Global GTT with more than 32bits" - "of address space! Found %lldM!\n", + " of address space! Found %lldM!\n", ggtt->base.total >> 20); ggtt->base.total = 1ULL << 32; ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total); } - /* - * Initialise stolen early so that we may reserve preallocated - * objects for the BIOS to KMS transition. - */ - ret = i915_gem_init_stolen(dev); - if (ret) - goto out_gtt_cleanup; + if (ggtt->mappable_end > ggtt->base.total) { + DRM_ERROR("mappable aperture extends past end of GGTT," + " aperture=%llx, total=%llx\n", + ggtt->mappable_end, ggtt->base.total); + ggtt->mappable_end = ggtt->base.total; + } /* GMADR is the PCI mmio aperture into the global GTT. */ DRM_INFO("Memory usable by graphics device = %lluM\n", @@ -3272,16 +3186,55 @@ int i915_ggtt_init_hw(struct drm_device *dev) #endif return 0; +} + +/** + * i915_ggtt_init_hw - Initialize GGTT hardware + * @dev_priv: i915 device + */ +int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) +{ + struct i915_ggtt *ggtt = &dev_priv->ggtt; + int ret; + + INIT_LIST_HEAD(&dev_priv->vm_list); + + /* Subtract the guard page before address space initialization to + * shrink the range used by drm_mm. + */ + ggtt->base.total -= PAGE_SIZE; + i915_address_space_init(&ggtt->base, dev_priv); + ggtt->base.total += PAGE_SIZE; + if (!HAS_LLC(dev_priv)) + ggtt->base.mm.color_adjust = i915_gtt_color_adjust; + + if (!io_mapping_init_wc(&dev_priv->ggtt.mappable, + dev_priv->ggtt.mappable_base, + dev_priv->ggtt.mappable_end)) { + ret = -EIO; + goto out_gtt_cleanup; + } + + ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end); + + /* + * Initialise stolen early so that we may reserve preallocated + * objects for the BIOS to KMS transition. + */ + ret = i915_gem_init_stolen(&dev_priv->drm); + if (ret) + goto out_gtt_cleanup; + + return 0; out_gtt_cleanup: ggtt->base.cleanup(&ggtt->base); - return ret; } -int i915_ggtt_enable_hw(struct drm_device *dev) +int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv) { - if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) + if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt()) return -EIO; return 0; @@ -3331,7 +3284,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) struct i915_hw_ppgtt *ppgtt; - if (vm->is_ggtt) + if (i915_is_ggtt(vm)) ppgtt = dev_priv->mm.aliasing_ppgtt; else ppgtt = i915_vm_to_ppgtt(vm); @@ -3344,65 +3297,155 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) i915_ggtt_flush(dev_priv); } +static void +i915_vma_retire(struct i915_gem_active *active, + struct drm_i915_gem_request *rq) +{ + const unsigned int idx = rq->engine->id; + struct i915_vma *vma = + container_of(active, struct i915_vma, last_read[idx]); + + GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx)); + + i915_vma_clear_active(vma, idx); + if (i915_vma_is_active(vma)) + return; + + list_move_tail(&vma->vm_link, &vma->vm->inactive_list); + if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma))) + WARN_ON(i915_vma_unbind(vma)); +} + +void i915_vma_destroy(struct i915_vma *vma) +{ + GEM_BUG_ON(vma->node.allocated); + GEM_BUG_ON(i915_vma_is_active(vma)); + GEM_BUG_ON(!i915_vma_is_closed(vma)); + GEM_BUG_ON(vma->fence); + + list_del(&vma->vm_link); + if (!i915_vma_is_ggtt(vma)) + i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); + + kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma); +} + +void i915_vma_close(struct i915_vma *vma) +{ + GEM_BUG_ON(i915_vma_is_closed(vma)); + vma->flags |= I915_VMA_CLOSED; + + list_del_init(&vma->obj_link); + if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma)) + WARN_ON(i915_vma_unbind(vma)); +} + static struct i915_vma * -__i915_gem_vma_create(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - const struct i915_ggtt_view *ggtt_view) +__i915_vma_create(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view) { struct i915_vma *vma; + int i; - if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) - return ERR_PTR(-EINVAL); + GEM_BUG_ON(vm->closed); vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL); if (vma == NULL) return ERR_PTR(-ENOMEM); - INIT_LIST_HEAD(&vma->vm_link); - INIT_LIST_HEAD(&vma->obj_link); INIT_LIST_HEAD(&vma->exec_list); + for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) + init_request_active(&vma->last_read[i], i915_vma_retire); + init_request_active(&vma->last_fence, NULL); + list_add(&vma->vm_link, &vm->unbound_list); vma->vm = vm; vma->obj = obj; - vma->is_ggtt = i915_is_ggtt(vm); + vma->size = obj->base.size; + + if (view) { + vma->ggtt_view = *view; + if (view->type == I915_GGTT_VIEW_PARTIAL) { + vma->size = view->params.partial.size; + vma->size <<= PAGE_SHIFT; + } else if (view->type == I915_GGTT_VIEW_ROTATED) { + vma->size = + intel_rotation_info_size(&view->params.rotated); + vma->size <<= PAGE_SHIFT; + } + } - if (i915_is_ggtt(vm)) - vma->ggtt_view = *ggtt_view; - else + if (i915_is_ggtt(vm)) { + vma->flags |= I915_VMA_GGTT; + } else { i915_ppgtt_get(i915_vm_to_ppgtt(vm)); + } list_add_tail(&vma->obj_link, &obj->vma_list); - return vma; } +static inline bool vma_matches(struct i915_vma *vma, + struct i915_address_space *vm, + const struct i915_ggtt_view *view) +{ + if (vma->vm != vm) + return false; + + if (!i915_vma_is_ggtt(vma)) + return true; + + if (!view) + return vma->ggtt_view.type == 0; + + if (vma->ggtt_view.type != view->type) + return false; + + return memcmp(&vma->ggtt_view.params, + &view->params, + sizeof(view->params)) == 0; +} + struct i915_vma * -i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, - struct i915_address_space *vm) +i915_vma_create(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view) +{ + GEM_BUG_ON(view && !i915_is_ggtt(vm)); + GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view)); + + return __i915_vma_create(obj, vm, view); +} + +struct i915_vma * +i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view) { struct i915_vma *vma; - vma = i915_gem_obj_to_vma(obj, vm); - if (!vma) - vma = __i915_gem_vma_create(obj, vm, - i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL); + list_for_each_entry_reverse(vma, &obj->vma_list, obj_link) + if (vma_matches(vma, vm, view)) + return vma; - return vma; + return NULL; } struct i915_vma * -i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, - const struct i915_ggtt_view *view) +i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; - struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view); + struct i915_vma *vma; + + GEM_BUG_ON(view && !i915_is_ggtt(vm)); + vma = i915_gem_obj_to_vma(obj, vm, view); if (!vma) - vma = __i915_gem_vma_create(obj, &ggtt->base, view); + vma = __i915_vma_create(obj, vm, view); + GEM_BUG_ON(i915_vma_is_closed(vma)); return vma; - } static struct scatterlist * @@ -3434,18 +3477,16 @@ rotate_pages(const dma_addr_t *in, unsigned int offset, } static struct sg_table * -intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, +intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info, struct drm_i915_gem_object *obj) { const size_t n_pages = obj->base.size / PAGE_SIZE; - unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height; - unsigned int size_pages_uv; + unsigned int size = intel_rotation_info_size(rot_info); struct sgt_iter sgt_iter; dma_addr_t dma_addr; unsigned long i; dma_addr_t *page_addr_list; struct sg_table *st; - unsigned int uv_start_page; struct scatterlist *sg; int ret = -ENOMEM; @@ -3456,18 +3497,12 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, if (!page_addr_list) return ERR_PTR(ret); - /* Account for UV plane with NV12. */ - if (rot_info->pixel_format == DRM_FORMAT_NV12) - size_pages_uv = rot_info->plane[1].width * rot_info->plane[1].height; - else - size_pages_uv = 0; - /* Allocate target SG list. */ st = kmalloc(sizeof(*st), GFP_KERNEL); if (!st) goto err_st_alloc; - ret = sg_alloc_table(st, size_pages + size_pages_uv, GFP_KERNEL); + ret = sg_alloc_table(st, size, GFP_KERNEL); if (ret) goto err_sg_alloc; @@ -3480,32 +3515,14 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, st->nents = 0; sg = st->sgl; - /* Rotate the pages. */ - sg = rotate_pages(page_addr_list, 0, - rot_info->plane[0].width, rot_info->plane[0].height, - rot_info->plane[0].width, - st, sg); - - /* Append the UV plane if NV12. */ - if (rot_info->pixel_format == DRM_FORMAT_NV12) { - uv_start_page = size_pages; - - /* Check for tile-row un-alignment. */ - if (offset_in_page(rot_info->uv_offset)) - uv_start_page--; - - rot_info->uv_start_page = uv_start_page; - - sg = rotate_pages(page_addr_list, rot_info->uv_start_page, - rot_info->plane[1].width, rot_info->plane[1].height, - rot_info->plane[1].width, - st, sg); + for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) { + sg = rotate_pages(page_addr_list, rot_info->plane[i].offset, + rot_info->plane[i].width, rot_info->plane[i].height, + rot_info->plane[i].stride, st, sg); } - DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages (%u plane 0)).\n", - obj->base.size, rot_info->plane[0].width, - rot_info->plane[0].height, size_pages + size_pages_uv, - size_pages); + DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n", + obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); drm_free_large(page_addr_list); @@ -3516,10 +3533,9 @@ err_sg_alloc: err_st_alloc: drm_free_large(page_addr_list); - DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%d) (%ux%u tiles, %u pages (%u plane 0))\n", - obj->base.size, ret, rot_info->plane[0].width, - rot_info->plane[0].height, size_pages + size_pages_uv, - size_pages); + DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", + obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); + return ERR_PTR(ret); } @@ -3569,28 +3585,27 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma) { int ret = 0; - if (vma->ggtt_view.pages) + if (vma->pages) return 0; if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) - vma->ggtt_view.pages = vma->obj->pages; + vma->pages = vma->obj->pages; else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED) - vma->ggtt_view.pages = + vma->pages = intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj); else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL) - vma->ggtt_view.pages = - intel_partial_pages(&vma->ggtt_view, vma->obj); + vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj); else WARN_ONCE(1, "GGTT view %u not implemented!\n", vma->ggtt_view.type); - if (!vma->ggtt_view.pages) { + if (!vma->pages) { DRM_ERROR("Failed to get pages for GGTT view type %u!\n", vma->ggtt_view.type); ret = -EINVAL; - } else if (IS_ERR(vma->ggtt_view.pages)) { - ret = PTR_ERR(vma->ggtt_view.pages); - vma->ggtt_view.pages = NULL; + } else if (IS_ERR(vma->pages)) { + ret = PTR_ERR(vma->pages); + vma->pages = NULL; DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n", vma->ggtt_view.type, ret); } @@ -3611,34 +3626,32 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma) int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { - int ret; u32 bind_flags; + u32 vma_flags; + int ret; if (WARN_ON(flags == 0)) return -EINVAL; bind_flags = 0; if (flags & PIN_GLOBAL) - bind_flags |= GLOBAL_BIND; + bind_flags |= I915_VMA_GLOBAL_BIND; if (flags & PIN_USER) - bind_flags |= LOCAL_BIND; + bind_flags |= I915_VMA_LOCAL_BIND; + vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); if (flags & PIN_UPDATE) - bind_flags |= vma->bound; + bind_flags |= vma_flags; else - bind_flags &= ~vma->bound; - + bind_flags &= ~vma_flags; if (bind_flags == 0) return 0; - if (vma->bound == 0 && vma->vm->allocate_va_range) { - /* XXX: i915_vma_pin() will fix this +- hack */ - vma->pin_count++; + if (vma_flags == 0 && vma->vm->allocate_va_range) { trace_i915_va_alloc(vma); ret = vma->vm->allocate_va_range(vma->vm, vma->node.start, vma->node.size); - vma->pin_count--; if (ret) return ret; } @@ -3647,56 +3660,47 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, if (ret) return ret; - vma->bound |= bind_flags; - + vma->flags |= bind_flags; return 0; } -/** - * i915_ggtt_view_size - Get the size of a GGTT view. - * @obj: Object the view is of. - * @view: The view in question. - * - * @return The size of the GGTT view in bytes. - */ -size_t -i915_ggtt_view_size(struct drm_i915_gem_object *obj, - const struct i915_ggtt_view *view) -{ - if (view->type == I915_GGTT_VIEW_NORMAL) { - return obj->base.size; - } else if (view->type == I915_GGTT_VIEW_ROTATED) { - return intel_rotation_info_size(&view->params.rotated) << PAGE_SHIFT; - } else if (view->type == I915_GGTT_VIEW_PARTIAL) { - return view->params.partial.size << PAGE_SHIFT; - } else { - WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type); - return obj->base.size; - } -} - void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) { void __iomem *ptr; + /* Access through the GTT requires the device to be awake. */ + assert_rpm_wakelock_held(to_i915(vma->vm->dev)); + lockdep_assert_held(&vma->vm->dev->struct_mutex); - if (WARN_ON(!vma->obj->map_and_fenceable)) - return ERR_PTR(-ENODEV); + if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) + return IO_ERR_PTR(-ENODEV); - GEM_BUG_ON(!vma->is_ggtt); - GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0); + GEM_BUG_ON(!i915_vma_is_ggtt(vma)); + GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0); ptr = vma->iomap; if (ptr == NULL) { - ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable, + ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable, vma->node.start, vma->node.size); if (ptr == NULL) - return ERR_PTR(-ENOMEM); + return IO_ERR_PTR(-ENOMEM); vma->iomap = ptr; } - vma->pin_count++; + __i915_vma_pin(vma); return ptr; } + +void i915_vma_unpin_and_release(struct i915_vma **p_vma) +{ + struct i915_vma *vma; + + vma = fetch_and_zero(p_vma); + if (!vma) + return; + + i915_vma_unpin(vma); + i915_vma_put(vma); +} diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index aa5f31d1c2ed..a9aec25535ac 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -36,7 +36,15 @@ #include <linux/io-mapping.h> +#include "i915_gem_request.h" + +#define I915_FENCE_REG_NONE -1 +#define I915_MAX_NUM_FENCES 32 +/* 32 fences + sign bit for FENCE_REG_NONE */ +#define I915_MAX_NUM_FENCE_BITS 6 + struct drm_i915_file_private; +struct drm_i915_fence_reg; typedef uint32_t gen6_pte_t; typedef uint64_t gen8_pte_t; @@ -137,12 +145,9 @@ enum i915_ggtt_view_type { }; struct intel_rotation_info { - unsigned int uv_offset; - uint32_t pixel_format; - unsigned int uv_start_page; struct { /* tiles */ - unsigned int width, height; + unsigned int width, height, stride, offset; } plane[2]; }; @@ -156,8 +161,6 @@ struct i915_ggtt_view { } partial; struct intel_rotation_info rotated; } params; - - struct sg_table *pages; }; extern const struct i915_ggtt_view i915_ggtt_view_normal; @@ -177,13 +180,38 @@ struct i915_vma { struct drm_mm_node node; struct drm_i915_gem_object *obj; struct i915_address_space *vm; + struct drm_i915_fence_reg *fence; + struct sg_table *pages; void __iomem *iomap; + u64 size; + u64 display_alignment; + + unsigned int flags; + /** + * How many users have pinned this object in GTT space. The following + * users can each hold at most one reference: pwrite/pread, execbuffer + * (objects are not allowed multiple times for the same batchbuffer), + * and the framebuffer code. When switching/pageflipping, the + * framebuffer code has at most two buffers pinned per crtc. + * + * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 + * bits with absolutely no headroom. So use 4 bits. + */ +#define I915_VMA_PIN_MASK 0xf +#define I915_VMA_PIN_OVERFLOW BIT(5) /** Flags and address space this VMA is bound to */ -#define GLOBAL_BIND (1<<0) -#define LOCAL_BIND (1<<1) - unsigned int bound : 4; - bool is_ggtt : 1; +#define I915_VMA_GLOBAL_BIND BIT(6) +#define I915_VMA_LOCAL_BIND BIT(7) +#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW) + +#define I915_VMA_GGTT BIT(8) +#define I915_VMA_CAN_FENCE BIT(9) +#define I915_VMA_CLOSED BIT(10) + + unsigned int active; + struct i915_gem_active last_read[I915_NUM_ENGINES]; + struct i915_gem_active last_fence; /** * Support different GGTT views into the same object. @@ -208,20 +236,66 @@ struct i915_vma { struct hlist_node exec_node; unsigned long exec_handle; struct drm_i915_gem_exec_object2 *exec_entry; - - /** - * How many users have pinned this object in GTT space. The following - * users can each hold at most one reference: pwrite/pread, execbuffer - * (objects are not allowed multiple times for the same batchbuffer), - * and the framebuffer code. When switching/pageflipping, the - * framebuffer code has at most two buffers pinned per crtc. - * - * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 - * bits with absolutely no headroom. So use 4 bits. */ - unsigned int pin_count:4; -#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf }; +struct i915_vma * +i915_vma_create(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *view); +void i915_vma_unpin_and_release(struct i915_vma **p_vma); + +static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) +{ + return vma->flags & I915_VMA_GGTT; +} + +static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma) +{ + return vma->flags & I915_VMA_CAN_FENCE; +} + +static inline bool i915_vma_is_closed(const struct i915_vma *vma) +{ + return vma->flags & I915_VMA_CLOSED; +} + +static inline unsigned int i915_vma_get_active(const struct i915_vma *vma) +{ + return vma->active; +} + +static inline bool i915_vma_is_active(const struct i915_vma *vma) +{ + return i915_vma_get_active(vma); +} + +static inline void i915_vma_set_active(struct i915_vma *vma, + unsigned int engine) +{ + vma->active |= BIT(engine); +} + +static inline void i915_vma_clear_active(struct i915_vma *vma, + unsigned int engine) +{ + vma->active &= ~BIT(engine); +} + +static inline bool i915_vma_has_active_engine(const struct i915_vma *vma, + unsigned int engine) +{ + return vma->active & BIT(engine); +} + +static inline u32 i915_ggtt_offset(const struct i915_vma *vma) +{ + GEM_BUG_ON(!i915_vma_is_ggtt(vma)); + GEM_BUG_ON(!vma->node.allocated); + GEM_BUG_ON(upper_32_bits(vma->node.start)); + GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1)); + return lower_32_bits(vma->node.start); +} + struct i915_page_dma { struct page *page; union { @@ -272,11 +346,20 @@ struct i915_pml4 { struct i915_address_space { struct drm_mm mm; struct drm_device *dev; + /* Every address space belongs to a struct file - except for the global + * GTT that is owned by the driver (and so @file is set to NULL). In + * principle, no information should leak from one context to another + * (or between files/processes etc) unless explicitly shared by the + * owner. Tracking the owner is important in order to free up per-file + * objects along with the file, to aide resource tracking, and to + * assign blame. + */ + struct drm_i915_file_private *file; struct list_head global_link; u64 start; /* Start offset always 0 for dri2 */ u64 total; /* size addr space maps (ex. 2GB for ggtt) */ - bool is_ggtt; + bool closed; struct i915_page_scratch *scratch_page; struct i915_page_table *scratch_pt; @@ -306,6 +389,13 @@ struct i915_address_space { */ struct list_head inactive_list; + /** + * List of vma that have been unbound. + * + * A reference is not held on the buffer while on this list. + */ + struct list_head unbound_list; + /* FIXME: Need a more generic return type */ gen6_pte_t (*pte_encode)(dma_addr_t addr, enum i915_cache_level level, @@ -338,7 +428,7 @@ struct i915_address_space { u32 flags); }; -#define i915_is_ggtt(V) ((V)->is_ggtt) +#define i915_is_ggtt(V) (!(V)->file) /* The Graphics Translation Table is the way in which GEN hardware translates a * Graphics Virtual Address into a Physical Address. In addition to the normal @@ -349,14 +439,13 @@ struct i915_address_space { */ struct i915_ggtt { struct i915_address_space base; + struct io_mapping mappable; /* Mapping to our CPU mappable region */ size_t stolen_size; /* Total size of stolen memory */ size_t stolen_usable_size; /* Total size minus BIOS reserved */ size_t stolen_reserved_base; size_t stolen_reserved_size; - size_t size; /* Total size of Global GTT */ u64 mappable_end; /* End offset that we can CPU map */ - struct io_mapping *mappable; /* Mapping to our CPU mappable region */ phys_addr_t mappable_base; /* PA of our GMADR */ /** "Graphics Stolen Memory" holds the global PTEs */ @@ -365,8 +454,6 @@ struct i915_ggtt { bool do_idle_maps; int mtrr; - - int (*probe)(struct i915_ggtt *ggtt); }; struct i915_hw_ppgtt { @@ -380,8 +467,6 @@ struct i915_hw_ppgtt { struct i915_page_directory pd; /* GEN6-7 */ }; - struct drm_i915_file_private *file_priv; - gen6_pte_t __iomem *pd_addr; int (*enable)(struct i915_hw_ppgtt *ppgtt); @@ -521,14 +606,15 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n) px_dma(ppgtt->base.scratch_pd); } -int i915_ggtt_init_hw(struct drm_device *dev); -int i915_ggtt_enable_hw(struct drm_device *dev); -void i915_gem_init_ggtt(struct drm_device *dev); -void i915_ggtt_cleanup_hw(struct drm_device *dev); +int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv); +int i915_ggtt_init_hw(struct drm_i915_private *dev_priv); +int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv); +int i915_gem_init_ggtt(struct drm_i915_private *dev_priv); +void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv); int i915_ppgtt_init_hw(struct drm_device *dev); void i915_ppgtt_release(struct kref *kref); -struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev, +struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv, struct drm_i915_file_private *fpriv); static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt) { @@ -548,23 +634,67 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev); int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); -static inline bool -i915_ggtt_view_equal(const struct i915_ggtt_view *a, - const struct i915_ggtt_view *b) +/* Flags used by pin/bind&friends. */ +#define PIN_NONBLOCK BIT(0) +#define PIN_MAPPABLE BIT(1) +#define PIN_ZONE_4G BIT(2) +#define PIN_NONFAULT BIT(3) + +#define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */ +#define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */ +#define PIN_USER BIT(7) /* I915_VMA_LOCAL_BIND */ +#define PIN_UPDATE BIT(8) + +#define PIN_HIGH BIT(9) +#define PIN_OFFSET_BIAS BIT(10) +#define PIN_OFFSET_FIXED BIT(11) +#define PIN_OFFSET_MASK (~4095) + +int __i915_vma_do_pin(struct i915_vma *vma, + u64 size, u64 alignment, u64 flags); +static inline int __must_check +i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) { - if (WARN_ON(!a || !b)) - return false; - - if (a->type != b->type) - return false; - if (a->type != I915_GGTT_VIEW_NORMAL) - return !memcmp(&a->params, &b->params, sizeof(a->params)); - return true; + BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW); + BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND); + BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND); + + /* Pin early to prevent the shrinker/eviction logic from destroying + * our vma as we insert and bind. + */ + if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0)) + return 0; + + return __i915_vma_do_pin(vma, size, alignment, flags); +} + +static inline int i915_vma_pin_count(const struct i915_vma *vma) +{ + return vma->flags & I915_VMA_PIN_MASK; +} + +static inline bool i915_vma_is_pinned(const struct i915_vma *vma) +{ + return i915_vma_pin_count(vma); +} + +static inline void __i915_vma_pin(struct i915_vma *vma) +{ + vma->flags++; + GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW); +} + +static inline void __i915_vma_unpin(struct i915_vma *vma) +{ + GEM_BUG_ON(!i915_vma_is_pinned(vma)); + vma->flags--; } -size_t -i915_ggtt_view_size(struct drm_i915_gem_object *obj, - const struct i915_ggtt_view *view); +static inline void i915_vma_unpin(struct i915_vma *vma) +{ + GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); + __i915_vma_unpin(vma); +} /** * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture @@ -580,6 +710,7 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj, * Returns a valid iomapped pointer or ERR_PTR. */ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma); +#define IO_ERR_PTR(x) ((void __iomem *)ERR_PTR(x)) /** * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap @@ -593,9 +724,14 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma); static inline void i915_vma_unpin_iomap(struct i915_vma *vma) { lockdep_assert_held(&vma->vm->dev->struct_mutex); - GEM_BUG_ON(vma->pin_count == 0); GEM_BUG_ON(vma->iomap == NULL); - vma->pin_count--; + i915_vma_unpin(vma); +} + +static inline struct page *i915_vma_first_page(struct i915_vma *vma) +{ + GEM_BUG_ON(!vma->pages); + return sg_page(vma->pages->sgl); } #endif diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index f75bbd67a13a..95b7e9afd5f8 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -28,10 +28,17 @@ #include "i915_drv.h" #include "intel_renderstate.h" +struct render_state { + const struct intel_renderstate_rodata *rodata; + struct i915_vma *vma; + u32 aux_batch_size; + u32 aux_batch_offset; +}; + static const struct intel_renderstate_rodata * -render_state_get_rodata(const int gen) +render_state_get_rodata(const struct drm_i915_gem_request *req) { - switch (gen) { + switch (INTEL_GEN(req->i915)) { case 6: return &gen6_null_state; case 7: @@ -45,35 +52,6 @@ render_state_get_rodata(const int gen) return NULL; } -static int render_state_init(struct render_state *so, - struct drm_i915_private *dev_priv) -{ - int ret; - - so->gen = INTEL_GEN(dev_priv); - so->rodata = render_state_get_rodata(so->gen); - if (so->rodata == NULL) - return 0; - - if (so->rodata->batch_items * 4 > 4096) - return -EINVAL; - - so->obj = i915_gem_object_create(&dev_priv->drm, 4096); - if (IS_ERR(so->obj)) - return PTR_ERR(so->obj); - - ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0); - if (ret) - goto free_gem; - - so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj); - return 0; - -free_gem: - drm_gem_object_unreference(&so->obj->base); - return ret; -} - /* * Macro to add commands to auxiliary batch. * This macro only checks for page overflow before inserting the commands, @@ -94,27 +72,28 @@ free_gem: static int render_state_setup(struct render_state *so) { - struct drm_device *dev = so->obj->base.dev; + struct drm_device *dev = so->vma->vm->dev; const struct intel_renderstate_rodata *rodata = so->rodata; + const bool has_64bit_reloc = INTEL_GEN(dev) >= 8; unsigned int i = 0, reloc_index = 0; struct page *page; u32 *d; int ret; - ret = i915_gem_object_set_to_cpu_domain(so->obj, true); + ret = i915_gem_object_set_to_cpu_domain(so->vma->obj, true); if (ret) return ret; - page = i915_gem_object_get_dirty_page(so->obj, 0); + page = i915_gem_object_get_dirty_page(so->vma->obj, 0); d = kmap(page); while (i < rodata->batch_items) { u32 s = rodata->batch[i]; if (i * 4 == rodata->reloc[reloc_index]) { - u64 r = s + so->ggtt_offset; + u64 r = s + so->vma->node.start; s = lower_32_bits(r); - if (so->gen >= 8) { + if (has_64bit_reloc) { if (i + 1 >= rodata->batch_items || rodata->batch[i + 1] != 0) { ret = -EINVAL; @@ -174,7 +153,7 @@ static int render_state_setup(struct render_state *so) kunmap(page); - ret = i915_gem_object_set_to_gtt_domain(so->obj, false); + ret = i915_gem_object_set_to_gtt_domain(so->vma->obj, false); if (ret) return ret; @@ -192,67 +171,60 @@ err_out: #undef OUT_BATCH -void i915_gem_render_state_fini(struct render_state *so) -{ - i915_gem_object_ggtt_unpin(so->obj); - drm_gem_object_unreference(&so->obj->base); -} - -int i915_gem_render_state_prepare(struct intel_engine_cs *engine, - struct render_state *so) +int i915_gem_render_state_init(struct drm_i915_gem_request *req) { + struct render_state so; + struct drm_i915_gem_object *obj; int ret; - if (WARN_ON(engine->id != RCS)) + if (WARN_ON(req->engine->id != RCS)) return -ENOENT; - ret = render_state_init(so, engine->i915); - if (ret) - return ret; - - if (so->rodata == NULL) + so.rodata = render_state_get_rodata(req); + if (!so.rodata) return 0; - ret = render_state_setup(so); - if (ret) { - i915_gem_render_state_fini(so); - return ret; - } + if (so.rodata->batch_items * 4 > 4096) + return -EINVAL; - return 0; -} + obj = i915_gem_object_create(&req->i915->drm, 4096); + if (IS_ERR(obj)) + return PTR_ERR(obj); -int i915_gem_render_state_init(struct drm_i915_gem_request *req) -{ - struct render_state so; - int ret; + so.vma = i915_vma_create(obj, &req->i915->ggtt.base, NULL); + if (IS_ERR(so.vma)) { + ret = PTR_ERR(so.vma); + goto err_obj; + } - ret = i915_gem_render_state_prepare(req->engine, &so); + ret = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL); if (ret) - return ret; + goto err_obj; - if (so.rodata == NULL) - return 0; + ret = render_state_setup(&so); + if (ret) + goto err_unpin; - ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset, - so.rodata->batch_items * 4, - I915_DISPATCH_SECURE); + ret = req->engine->emit_bb_start(req, so.vma->node.start, + so.rodata->batch_items * 4, + I915_DISPATCH_SECURE); if (ret) - goto out; + goto err_unpin; if (so.aux_batch_size > 8) { - ret = req->engine->dispatch_execbuffer(req, - (so.ggtt_offset + - so.aux_batch_offset), - so.aux_batch_size, - I915_DISPATCH_SECURE); + ret = req->engine->emit_bb_start(req, + (so.vma->node.start + + so.aux_batch_offset), + so.aux_batch_size, + I915_DISPATCH_SECURE); if (ret) - goto out; + goto err_unpin; } - i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req); - -out: - i915_gem_render_state_fini(&so); + i915_vma_move_to_active(so.vma, req, 0); +err_unpin: + i915_vma_unpin(so.vma); +err_obj: + i915_gem_object_put(obj); return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h index 6aaa3a10a630..18cce3f06e9c 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.h +++ b/drivers/gpu/drm/i915/i915_gem_render_state.h @@ -24,26 +24,8 @@ #ifndef _I915_GEM_RENDER_STATE_H_ #define _I915_GEM_RENDER_STATE_H_ -#include <linux/types.h> - -struct intel_renderstate_rodata { - const u32 *reloc; - const u32 *batch; - const u32 batch_items; -}; - -struct render_state { - const struct intel_renderstate_rodata *rodata; - struct drm_i915_gem_object *obj; - u64 ggtt_offset; - int gen; - u32 aux_batch_size; - u32 aux_batch_offset; -}; +struct drm_i915_gem_request; int i915_gem_render_state_init(struct drm_i915_gem_request *req); -void i915_gem_render_state_fini(struct render_state *so); -int i915_gem_render_state_prepare(struct intel_engine_cs *engine, - struct render_state *so); #endif /* _I915_GEM_RENDER_STATE_H_ */ diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c new file mode 100644 index 000000000000..1a215320cefb --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -0,0 +1,785 @@ +/* + * Copyright © 2008-2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include <linux/prefetch.h> + +#include "i915_drv.h" + +static const char *i915_fence_get_driver_name(struct fence *fence) +{ + return "i915"; +} + +static const char *i915_fence_get_timeline_name(struct fence *fence) +{ + /* Timelines are bound by eviction to a VM. However, since + * we only have a global seqno at the moment, we only have + * a single timeline. Note that each timeline will have + * multiple execution contexts (fence contexts) as we allow + * engines within a single timeline to execute in parallel. + */ + return "global"; +} + +static bool i915_fence_signaled(struct fence *fence) +{ + return i915_gem_request_completed(to_request(fence)); +} + +static bool i915_fence_enable_signaling(struct fence *fence) +{ + if (i915_fence_signaled(fence)) + return false; + + intel_engine_enable_signaling(to_request(fence)); + return true; +} + +static signed long i915_fence_wait(struct fence *fence, + bool interruptible, + signed long timeout_jiffies) +{ + s64 timeout_ns, *timeout; + int ret; + + if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) { + timeout_ns = jiffies_to_nsecs(timeout_jiffies); + timeout = &timeout_ns; + } else { + timeout = NULL; + } + + ret = i915_wait_request(to_request(fence), + interruptible, timeout, + NO_WAITBOOST); + if (ret == -ETIME) + return 0; + + if (ret < 0) + return ret; + + if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) + timeout_jiffies = nsecs_to_jiffies(timeout_ns); + + return timeout_jiffies; +} + +static void i915_fence_value_str(struct fence *fence, char *str, int size) +{ + snprintf(str, size, "%u", fence->seqno); +} + +static void i915_fence_timeline_value_str(struct fence *fence, char *str, + int size) +{ + snprintf(str, size, "%u", + intel_engine_get_seqno(to_request(fence)->engine)); +} + +static void i915_fence_release(struct fence *fence) +{ + struct drm_i915_gem_request *req = to_request(fence); + + kmem_cache_free(req->i915->requests, req); +} + +const struct fence_ops i915_fence_ops = { + .get_driver_name = i915_fence_get_driver_name, + .get_timeline_name = i915_fence_get_timeline_name, + .enable_signaling = i915_fence_enable_signaling, + .signaled = i915_fence_signaled, + .wait = i915_fence_wait, + .release = i915_fence_release, + .fence_value_str = i915_fence_value_str, + .timeline_value_str = i915_fence_timeline_value_str, +}; + +int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, + struct drm_file *file) +{ + struct drm_i915_private *dev_private; + struct drm_i915_file_private *file_priv; + + WARN_ON(!req || !file || req->file_priv); + + if (!req || !file) + return -EINVAL; + + if (req->file_priv) + return -EINVAL; + + dev_private = req->i915; + file_priv = file->driver_priv; + + spin_lock(&file_priv->mm.lock); + req->file_priv = file_priv; + list_add_tail(&req->client_list, &file_priv->mm.request_list); + spin_unlock(&file_priv->mm.lock); + + return 0; +} + +static inline void +i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) +{ + struct drm_i915_file_private *file_priv = request->file_priv; + + if (!file_priv) + return; + + spin_lock(&file_priv->mm.lock); + list_del(&request->client_list); + request->file_priv = NULL; + spin_unlock(&file_priv->mm.lock); +} + +void i915_gem_retire_noop(struct i915_gem_active *active, + struct drm_i915_gem_request *request) +{ + /* Space left intentionally blank */ +} + +static void i915_gem_request_retire(struct drm_i915_gem_request *request) +{ + struct i915_gem_active *active, *next; + + trace_i915_gem_request_retire(request); + list_del(&request->link); + + /* We know the GPU must have read the request to have + * sent us the seqno + interrupt, so use the position + * of tail of the request to update the last known position + * of the GPU head. + * + * Note this requires that we are always called in request + * completion order. + */ + list_del(&request->ring_link); + request->ring->last_retired_head = request->postfix; + + /* Walk through the active list, calling retire on each. This allows + * objects to track their GPU activity and mark themselves as idle + * when their *last* active request is completed (updating state + * tracking lists for eviction, active references for GEM, etc). + * + * As the ->retire() may free the node, we decouple it first and + * pass along the auxiliary information (to avoid dereferencing + * the node after the callback). + */ + list_for_each_entry_safe(active, next, &request->active_list, link) { + /* In microbenchmarks or focusing upon time inside the kernel, + * we may spend an inordinate amount of time simply handling + * the retirement of requests and processing their callbacks. + * Of which, this loop itself is particularly hot due to the + * cache misses when jumping around the list of i915_gem_active. + * So we try to keep this loop as streamlined as possible and + * also prefetch the next i915_gem_active to try and hide + * the likely cache miss. + */ + prefetchw(next); + + INIT_LIST_HEAD(&active->link); + RCU_INIT_POINTER(active->request, NULL); + + active->retire(active, request); + } + + i915_gem_request_remove_from_client(request); + + if (request->previous_context) { + if (i915.enable_execlists) + intel_lr_context_unpin(request->previous_context, + request->engine); + } + + i915_gem_context_put(request->ctx); + i915_gem_request_put(request); +} + +void i915_gem_request_retire_upto(struct drm_i915_gem_request *req) +{ + struct intel_engine_cs *engine = req->engine; + struct drm_i915_gem_request *tmp; + + lockdep_assert_held(&req->i915->drm.struct_mutex); + GEM_BUG_ON(list_empty(&req->link)); + + do { + tmp = list_first_entry(&engine->request_list, + typeof(*tmp), link); + + i915_gem_request_retire(tmp); + } while (tmp != req); +} + +static int i915_gem_check_wedge(unsigned int reset_counter, bool interruptible) +{ + if (__i915_terminally_wedged(reset_counter)) + return -EIO; + + if (__i915_reset_in_progress(reset_counter)) { + /* Non-interruptible callers can't handle -EAGAIN, hence return + * -EIO unconditionally for these. + */ + if (!interruptible) + return -EIO; + + return -EAGAIN; + } + + return 0; +} + +static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno) +{ + struct intel_engine_cs *engine; + int ret; + + /* Carefully retire all requests without writing to the rings */ + for_each_engine(engine, dev_priv) { + ret = intel_engine_idle(engine, true); + if (ret) + return ret; + } + i915_gem_retire_requests(dev_priv); + + /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ + if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) { + while (intel_kick_waiters(dev_priv) || + intel_kick_signalers(dev_priv)) + yield(); + } + + /* Finally reset hw state */ + for_each_engine(engine, dev_priv) + intel_engine_init_seqno(engine, seqno); + + return 0; +} + +int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + int ret; + + if (seqno == 0) + return -EINVAL; + + /* HWS page needs to be set less than what we + * will inject to ring + */ + ret = i915_gem_init_seqno(dev_priv, seqno - 1); + if (ret) + return ret; + + dev_priv->next_seqno = seqno; + return 0; +} + +static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno) +{ + /* reserve 0 for non-seqno */ + if (unlikely(dev_priv->next_seqno == 0)) { + int ret; + + ret = i915_gem_init_seqno(dev_priv, 0); + if (ret) + return ret; + + dev_priv->next_seqno = 1; + } + + *seqno = dev_priv->next_seqno++; + return 0; +} + +/** + * i915_gem_request_alloc - allocate a request structure + * + * @engine: engine that we wish to issue the request on. + * @ctx: context that the request will be associated with. + * This can be NULL if the request is not directly related to + * any specific user context, in which case this function will + * choose an appropriate context to use. + * + * Returns a pointer to the allocated request if successful, + * or an error code if not. + */ +struct drm_i915_gem_request * +i915_gem_request_alloc(struct intel_engine_cs *engine, + struct i915_gem_context *ctx) +{ + struct drm_i915_private *dev_priv = engine->i915; + unsigned int reset_counter = i915_reset_counter(&dev_priv->gpu_error); + struct drm_i915_gem_request *req; + u32 seqno; + int ret; + + /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report + * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex + * and restart. + */ + ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible); + if (ret) + return ERR_PTR(ret); + + /* Move the oldest request to the slab-cache (if not in use!) */ + req = list_first_entry_or_null(&engine->request_list, + typeof(*req), link); + if (req && i915_gem_request_completed(req)) + i915_gem_request_retire(req); + + /* Beware: Dragons be flying overhead. + * + * We use RCU to look up requests in flight. The lookups may + * race with the request being allocated from the slab freelist. + * That is the request we are writing to here, may be in the process + * of being read by __i915_gem_active_get_rcu(). As such, + * we have to be very careful when overwriting the contents. During + * the RCU lookup, we change chase the request->engine pointer, + * read the request->fence.seqno and increment the reference count. + * + * The reference count is incremented atomically. If it is zero, + * the lookup knows the request is unallocated and complete. Otherwise, + * it is either still in use, or has been reallocated and reset + * with fence_init(). This increment is safe for release as we check + * that the request we have a reference to and matches the active + * request. + * + * Before we increment the refcount, we chase the request->engine + * pointer. We must not call kmem_cache_zalloc() or else we set + * that pointer to NULL and cause a crash during the lookup. If + * we see the request is completed (based on the value of the + * old engine and seqno), the lookup is complete and reports NULL. + * If we decide the request is not completed (new engine or seqno), + * then we grab a reference and double check that it is still the + * active request - which it won't be and restart the lookup. + * + * Do not use kmem_cache_zalloc() here! + */ + req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL); + if (!req) + return ERR_PTR(-ENOMEM); + + ret = i915_gem_get_seqno(dev_priv, &seqno); + if (ret) + goto err; + + spin_lock_init(&req->lock); + fence_init(&req->fence, + &i915_fence_ops, + &req->lock, + engine->fence_context, + seqno); + + INIT_LIST_HEAD(&req->active_list); + req->i915 = dev_priv; + req->engine = engine; + req->ctx = i915_gem_context_get(ctx); + + /* No zalloc, must clear what we need by hand */ + req->previous_context = NULL; + req->file_priv = NULL; + req->batch = NULL; + req->elsp_submitted = 0; + + /* + * Reserve space in the ring buffer for all the commands required to + * eventually emit this request. This is to guarantee that the + * i915_add_request() call can't fail. Note that the reserve may need + * to be redone if the request is not actually submitted straight + * away, e.g. because a GPU scheduler has deferred it. + */ + req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; + + if (i915.enable_execlists) + ret = intel_logical_ring_alloc_request_extras(req); + else + ret = intel_ring_alloc_request_extras(req); + if (ret) + goto err_ctx; + + /* Record the position of the start of the request so that + * should we detect the updated seqno part-way through the + * GPU processing the request, we never over-estimate the + * position of the head. + */ + req->head = req->ring->tail; + + return req; + +err_ctx: + i915_gem_context_put(ctx); +err: + kmem_cache_free(dev_priv->requests, req); + return ERR_PTR(ret); +} + +static void i915_gem_mark_busy(const struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + + dev_priv->gt.active_engines |= intel_engine_flag(engine); + if (dev_priv->gt.awake) + return; + + intel_runtime_pm_get_noresume(dev_priv); + dev_priv->gt.awake = true; + + intel_enable_gt_powersave(dev_priv); + i915_update_gfx_val(dev_priv); + if (INTEL_GEN(dev_priv) >= 6) + gen6_rps_busy(dev_priv); + + queue_delayed_work(dev_priv->wq, + &dev_priv->gt.retire_work, + round_jiffies_up_relative(HZ)); +} + +/* + * NB: This function is not allowed to fail. Doing so would mean the the + * request is not being tracked for completion but the work itself is + * going to happen on the hardware. This would be a Bad Thing(tm). + */ +void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches) +{ + struct intel_engine_cs *engine = request->engine; + struct intel_ring *ring = request->ring; + u32 request_start; + u32 reserved_tail; + int ret; + + /* + * To ensure that this call will not fail, space for its emissions + * should already have been reserved in the ring buffer. Let the ring + * know that it is time to use that space up. + */ + request_start = ring->tail; + reserved_tail = request->reserved_space; + request->reserved_space = 0; + + /* + * Emit any outstanding flushes - execbuf can fail to emit the flush + * after having emitted the batchbuffer command. Hence we need to fix + * things up similar to emitting the lazy request. The difference here + * is that the flush _must_ happen before the next request, no matter + * what. + */ + if (flush_caches) { + ret = engine->emit_flush(request, EMIT_FLUSH); + + /* Not allowed to fail! */ + WARN(ret, "engine->emit_flush() failed: %d!\n", ret); + } + + trace_i915_gem_request_add(request); + + /* Seal the request and mark it as pending execution. Note that + * we may inspect this state, without holding any locks, during + * hangcheck. Hence we apply the barrier to ensure that we do not + * see a more recent value in the hws than we are tracking. + */ + request->emitted_jiffies = jiffies; + request->previous_seqno = engine->last_submitted_seqno; + engine->last_submitted_seqno = request->fence.seqno; + i915_gem_active_set(&engine->last_request, request); + list_add_tail(&request->link, &engine->request_list); + list_add_tail(&request->ring_link, &ring->request_list); + + /* Record the position of the start of the breadcrumb so that + * should we detect the updated seqno part-way through the + * GPU processing the request, we never over-estimate the + * position of the ring's HEAD. + */ + request->postfix = ring->tail; + + /* Not allowed to fail! */ + ret = engine->emit_request(request); + WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret); + + /* Sanity check that the reserved size was large enough. */ + ret = ring->tail - request_start; + if (ret < 0) + ret += ring->size; + WARN_ONCE(ret > reserved_tail, + "Not enough space reserved (%d bytes) " + "for adding the request (%d bytes)\n", + reserved_tail, ret); + + i915_gem_mark_busy(engine); + engine->submit_request(request); +} + +static unsigned long local_clock_us(unsigned int *cpu) +{ + unsigned long t; + + /* Cheaply and approximately convert from nanoseconds to microseconds. + * The result and subsequent calculations are also defined in the same + * approximate microseconds units. The principal source of timing + * error here is from the simple truncation. + * + * Note that local_clock() is only defined wrt to the current CPU; + * the comparisons are no longer valid if we switch CPUs. Instead of + * blocking preemption for the entire busywait, we can detect the CPU + * switch and use that as indicator of system load and a reason to + * stop busywaiting, see busywait_stop(). + */ + *cpu = get_cpu(); + t = local_clock() >> 10; + put_cpu(); + + return t; +} + +static bool busywait_stop(unsigned long timeout, unsigned int cpu) +{ + unsigned int this_cpu; + + if (time_after(local_clock_us(&this_cpu), timeout)) + return true; + + return this_cpu != cpu; +} + +bool __i915_spin_request(const struct drm_i915_gem_request *req, + int state, unsigned long timeout_us) +{ + unsigned int cpu; + + /* When waiting for high frequency requests, e.g. during synchronous + * rendering split between the CPU and GPU, the finite amount of time + * required to set up the irq and wait upon it limits the response + * rate. By busywaiting on the request completion for a short while we + * can service the high frequency waits as quick as possible. However, + * if it is a slow request, we want to sleep as quickly as possible. + * The tradeoff between waiting and sleeping is roughly the time it + * takes to sleep on a request, on the order of a microsecond. + */ + + timeout_us += local_clock_us(&cpu); + do { + if (i915_gem_request_completed(req)) + return true; + + if (signal_pending_state(state, current)) + break; + + if (busywait_stop(timeout_us, cpu)) + break; + + cpu_relax_lowlatency(); + } while (!need_resched()); + + return false; +} + +/** + * i915_wait_request - wait until execution of request has finished + * @req: duh! + * @interruptible: do an interruptible wait (normally yes) + * @timeout: in - how long to wait (NULL forever); out - how much time remaining + * @rps: client to charge for RPS boosting + * + * Note: It is of utmost importance that the passed in seqno and reset_counter + * values have been read by the caller in an smp safe manner. Where read-side + * locks are involved, it is sufficient to read the reset_counter before + * unlocking the lock that protects the seqno. For lockless tricks, the + * reset_counter _must_ be read before, and an appropriate smp_rmb must be + * inserted. + * + * Returns 0 if the request was found within the alloted time. Else returns the + * errno with remaining time filled in timeout argument. + */ +int i915_wait_request(struct drm_i915_gem_request *req, + bool interruptible, + s64 *timeout, + struct intel_rps_client *rps) +{ + int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; + DEFINE_WAIT(reset); + struct intel_wait wait; + unsigned long timeout_remain; + int ret = 0; + + might_sleep(); + + if (i915_gem_request_completed(req)) + return 0; + + timeout_remain = MAX_SCHEDULE_TIMEOUT; + if (timeout) { + if (WARN_ON(*timeout < 0)) + return -EINVAL; + + if (*timeout == 0) + return -ETIME; + + /* Record current time in case interrupted, or wedged */ + timeout_remain = nsecs_to_jiffies_timeout(*timeout); + *timeout += ktime_get_raw_ns(); + } + + trace_i915_gem_request_wait_begin(req); + + /* This client is about to stall waiting for the GPU. In many cases + * this is undesirable and limits the throughput of the system, as + * many clients cannot continue processing user input/output whilst + * blocked. RPS autotuning may take tens of milliseconds to respond + * to the GPU load and thus incurs additional latency for the client. + * We can circumvent that by promoting the GPU frequency to maximum + * before we wait. This makes the GPU throttle up much more quickly + * (good for benchmarks and user experience, e.g. window animations), + * but at a cost of spending more power processing the workload + * (bad for battery). Not all clients even want their results + * immediately and for them we should just let the GPU select its own + * frequency to maximise efficiency. To prevent a single client from + * forcing the clocks too high for the whole system, we only allow + * each client to waitboost once in a busy period. + */ + if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6) + gen6_rps_boost(req->i915, rps, req->emitted_jiffies); + + /* Optimistic short spin before touching IRQs */ + if (i915_spin_request(req, state, 5)) + goto complete; + + set_current_state(state); + add_wait_queue(&req->i915->gpu_error.wait_queue, &reset); + + intel_wait_init(&wait, req->fence.seqno); + if (intel_engine_add_wait(req->engine, &wait)) + /* In order to check that we haven't missed the interrupt + * as we enabled it, we need to kick ourselves to do a + * coherent check on the seqno before we sleep. + */ + goto wakeup; + + for (;;) { + if (signal_pending_state(state, current)) { + ret = -ERESTARTSYS; + break; + } + + timeout_remain = io_schedule_timeout(timeout_remain); + if (timeout_remain == 0) { + ret = -ETIME; + break; + } + + if (intel_wait_complete(&wait)) + break; + + set_current_state(state); + +wakeup: + /* Carefully check if the request is complete, giving time + * for the seqno to be visible following the interrupt. + * We also have to check in case we are kicked by the GPU + * reset in order to drop the struct_mutex. + */ + if (__i915_request_irq_complete(req)) + break; + + /* Only spin if we know the GPU is processing this request */ + if (i915_spin_request(req, state, 2)) + break; + } + remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset); + + intel_engine_remove_wait(req->engine, &wait); + __set_current_state(TASK_RUNNING); +complete: + trace_i915_gem_request_wait_end(req); + + if (timeout) { + *timeout -= ktime_get_raw_ns(); + if (*timeout < 0) + *timeout = 0; + + /* + * Apparently ktime isn't accurate enough and occasionally has a + * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch + * things up to make the test happy. We allow up to 1 jiffy. + * + * This is a regrssion from the timespec->ktime conversion. + */ + if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000) + *timeout = 0; + } + + if (IS_RPS_USER(rps) && + req->fence.seqno == req->engine->last_submitted_seqno) { + /* The GPU is now idle and this client has stalled. + * Since no other client has submitted a request in the + * meantime, assume that this client is the only one + * supplying work to the GPU but is unable to keep that + * work supplied because it is waiting. Since the GPU is + * then never kept fully busy, RPS autoclocking will + * keep the clocks relatively low, causing further delays. + * Compensate by giving the synchronous client credit for + * a waitboost next time. + */ + spin_lock(&req->i915->rps.client_lock); + list_del_init(&rps->link); + spin_unlock(&req->i915->rps.client_lock); + } + + return ret; +} + +static void engine_retire_requests(struct intel_engine_cs *engine) +{ + struct drm_i915_gem_request *request, *next; + + list_for_each_entry_safe(request, next, &engine->request_list, link) { + if (!i915_gem_request_completed(request)) + break; + + i915_gem_request_retire(request); + } +} + +void i915_gem_retire_requests(struct drm_i915_private *dev_priv) +{ + struct intel_engine_cs *engine; + + lockdep_assert_held(&dev_priv->drm.struct_mutex); + + if (dev_priv->gt.active_engines == 0) + return; + + GEM_BUG_ON(!dev_priv->gt.awake); + + for_each_engine(engine, dev_priv) { + engine_retire_requests(engine); + if (!intel_engine_is_active(engine)) + dev_priv->gt.active_engines &= ~intel_engine_flag(engine); + } + + if (dev_priv->gt.active_engines == 0) + queue_delayed_work(dev_priv->wq, + &dev_priv->gt.idle_work, + msecs_to_jiffies(100)); +} diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h new file mode 100644 index 000000000000..6c72bd8d9423 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_request.h @@ -0,0 +1,677 @@ +/* + * Copyright © 2008-2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef I915_GEM_REQUEST_H +#define I915_GEM_REQUEST_H + +#include <linux/fence.h> + +#include "i915_gem.h" + +struct intel_wait { + struct rb_node node; + struct task_struct *tsk; + u32 seqno; +}; + +struct intel_signal_node { + struct rb_node node; + struct intel_wait wait; +}; + +/** + * Request queue structure. + * + * The request queue allows us to note sequence numbers that have been emitted + * and may be associated with active buffers to be retired. + * + * By keeping this list, we can avoid having to do questionable sequence + * number comparisons on buffer last_read|write_seqno. It also allows an + * emission time to be associated with the request for tracking how far ahead + * of the GPU the submission is. + * + * When modifying this structure be very aware that we perform a lockless + * RCU lookup of it that may race against reallocation of the struct + * from the slab freelist. We intentionally do not zero the structure on + * allocation so that the lookup can use the dangling pointers (and is + * cogniscent that those pointers may be wrong). Instead, everything that + * needs to be initialised must be done so explicitly. + * + * The requests are reference counted. + */ +struct drm_i915_gem_request { + struct fence fence; + spinlock_t lock; + + /** On Which ring this request was generated */ + struct drm_i915_private *i915; + + /** + * Context and ring buffer related to this request + * Contexts are refcounted, so when this request is associated with a + * context, we must increment the context's refcount, to guarantee that + * it persists while any request is linked to it. Requests themselves + * are also refcounted, so the request will only be freed when the last + * reference to it is dismissed, and the code in + * i915_gem_request_free() will then decrement the refcount on the + * context. + */ + struct i915_gem_context *ctx; + struct intel_engine_cs *engine; + struct intel_ring *ring; + struct intel_signal_node signaling; + + /** GEM sequence number associated with the previous request, + * when the HWS breadcrumb is equal to this the GPU is processing + * this request. + */ + u32 previous_seqno; + + /** Position in the ringbuffer of the start of the request */ + u32 head; + + /** + * Position in the ringbuffer of the start of the postfix. + * This is required to calculate the maximum available ringbuffer + * space without overwriting the postfix. + */ + u32 postfix; + + /** Position in the ringbuffer of the end of the whole request */ + u32 tail; + + /** Preallocate space in the ringbuffer for the emitting the request */ + u32 reserved_space; + + /** + * Context related to the previous request. + * As the contexts are accessed by the hardware until the switch is + * completed to a new context, the hardware may still be writing + * to the context object after the breadcrumb is visible. We must + * not unpin/unbind/prune that object whilst still active and so + * we keep the previous context pinned until the following (this) + * request is retired. + */ + struct i915_gem_context *previous_context; + + /** Batch buffer related to this request if any (used for + * error state dump only). + */ + struct i915_vma *batch; + struct list_head active_list; + + /** Time at which this request was emitted, in jiffies. */ + unsigned long emitted_jiffies; + + /** engine->request_list entry for this request */ + struct list_head link; + + /** ring->request_list entry for this request */ + struct list_head ring_link; + + struct drm_i915_file_private *file_priv; + /** file_priv list entry for this request */ + struct list_head client_list; + + /** + * The ELSP only accepts two elements at a time, so we queue + * context/tail pairs on a given queue (ring->execlist_queue) until the + * hardware is available. The queue serves a double purpose: we also use + * it to keep track of the up to 2 contexts currently in the hardware + * (usually one in execution and the other queued up by the GPU): We + * only remove elements from the head of the queue when the hardware + * informs us that an element has been completed. + * + * All accesses to the queue are mediated by a spinlock + * (ring->execlist_lock). + */ + + /** Execlist link in the submission queue.*/ + struct list_head execlist_link; + + /** Execlists no. of times this request has been sent to the ELSP */ + int elsp_submitted; + + /** Execlists context hardware id. */ + unsigned int ctx_hw_id; +}; + +extern const struct fence_ops i915_fence_ops; + +static inline bool fence_is_i915(struct fence *fence) +{ + return fence->ops == &i915_fence_ops; +} + +struct drm_i915_gem_request * __must_check +i915_gem_request_alloc(struct intel_engine_cs *engine, + struct i915_gem_context *ctx); +int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, + struct drm_file *file); +void i915_gem_request_retire_upto(struct drm_i915_gem_request *req); + +static inline u32 +i915_gem_request_get_seqno(struct drm_i915_gem_request *req) +{ + return req ? req->fence.seqno : 0; +} + +static inline struct intel_engine_cs * +i915_gem_request_get_engine(struct drm_i915_gem_request *req) +{ + return req ? req->engine : NULL; +} + +static inline struct drm_i915_gem_request * +to_request(struct fence *fence) +{ + /* We assume that NULL fence/request are interoperable */ + BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0); + GEM_BUG_ON(fence && !fence_is_i915(fence)); + return container_of(fence, struct drm_i915_gem_request, fence); +} + +static inline struct drm_i915_gem_request * +i915_gem_request_get(struct drm_i915_gem_request *req) +{ + return to_request(fence_get(&req->fence)); +} + +static inline struct drm_i915_gem_request * +i915_gem_request_get_rcu(struct drm_i915_gem_request *req) +{ + return to_request(fence_get_rcu(&req->fence)); +} + +static inline void +i915_gem_request_put(struct drm_i915_gem_request *req) +{ + fence_put(&req->fence); +} + +static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, + struct drm_i915_gem_request *src) +{ + if (src) + i915_gem_request_get(src); + + if (*pdst) + i915_gem_request_put(*pdst); + + *pdst = src; +} + +void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches); +#define i915_add_request(req) \ + __i915_add_request(req, true) +#define i915_add_request_no_flush(req) \ + __i915_add_request(req, false) + +struct intel_rps_client; +#define NO_WAITBOOST ERR_PTR(-1) +#define IS_RPS_CLIENT(p) (!IS_ERR(p)) +#define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p)) + +int i915_wait_request(struct drm_i915_gem_request *req, + bool interruptible, + s64 *timeout, + struct intel_rps_client *rps) + __attribute__((nonnull(1))); + +static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine); + +/** + * Returns true if seq1 is later than seq2. + */ +static inline bool i915_seqno_passed(u32 seq1, u32 seq2) +{ + return (s32)(seq1 - seq2) >= 0; +} + +static inline bool +i915_gem_request_started(const struct drm_i915_gem_request *req) +{ + return i915_seqno_passed(intel_engine_get_seqno(req->engine), + req->previous_seqno); +} + +static inline bool +i915_gem_request_completed(const struct drm_i915_gem_request *req) +{ + return i915_seqno_passed(intel_engine_get_seqno(req->engine), + req->fence.seqno); +} + +bool __i915_spin_request(const struct drm_i915_gem_request *request, + int state, unsigned long timeout_us); +static inline bool i915_spin_request(const struct drm_i915_gem_request *request, + int state, unsigned long timeout_us) +{ + return (i915_gem_request_started(request) && + __i915_spin_request(request, state, timeout_us)); +} + +/* We treat requests as fences. This is not be to confused with our + * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync. + * We use the fences to synchronize access from the CPU with activity on the + * GPU, for example, we should not rewrite an object's PTE whilst the GPU + * is reading them. We also track fences at a higher level to provide + * implicit synchronisation around GEM objects, e.g. set-domain will wait + * for outstanding GPU rendering before marking the object ready for CPU + * access, or a pageflip will wait until the GPU is complete before showing + * the frame on the scanout. + * + * In order to use a fence, the object must track the fence it needs to + * serialise with. For example, GEM objects want to track both read and + * write access so that we can perform concurrent read operations between + * the CPU and GPU engines, as well as waiting for all rendering to + * complete, or waiting for the last GPU user of a "fence register". The + * object then embeds a #i915_gem_active to track the most recent (in + * retirement order) request relevant for the desired mode of access. + * The #i915_gem_active is updated with i915_gem_active_set() to track the + * most recent fence request, typically this is done as part of + * i915_vma_move_to_active(). + * + * When the #i915_gem_active completes (is retired), it will + * signal its completion to the owner through a callback as well as mark + * itself as idle (i915_gem_active.request == NULL). The owner + * can then perform any action, such as delayed freeing of an active + * resource including itself. + */ +struct i915_gem_active; + +typedef void (*i915_gem_retire_fn)(struct i915_gem_active *, + struct drm_i915_gem_request *); + +struct i915_gem_active { + struct drm_i915_gem_request __rcu *request; + struct list_head link; + i915_gem_retire_fn retire; +}; + +void i915_gem_retire_noop(struct i915_gem_active *, + struct drm_i915_gem_request *request); + +/** + * init_request_active - prepares the activity tracker for use + * @active - the active tracker + * @func - a callback when then the tracker is retired (becomes idle), + * can be NULL + * + * init_request_active() prepares the embedded @active struct for use as + * an activity tracker, that is for tracking the last known active request + * associated with it. When the last request becomes idle, when it is retired + * after completion, the optional callback @func is invoked. + */ +static inline void +init_request_active(struct i915_gem_active *active, + i915_gem_retire_fn retire) +{ + INIT_LIST_HEAD(&active->link); + active->retire = retire ?: i915_gem_retire_noop; +} + +/** + * i915_gem_active_set - updates the tracker to watch the current request + * @active - the active tracker + * @request - the request to watch + * + * i915_gem_active_set() watches the given @request for completion. Whilst + * that @request is busy, the @active reports busy. When that @request is + * retired, the @active tracker is updated to report idle. + */ +static inline void +i915_gem_active_set(struct i915_gem_active *active, + struct drm_i915_gem_request *request) +{ + list_move(&active->link, &request->active_list); + rcu_assign_pointer(active->request, request); +} + +static inline struct drm_i915_gem_request * +__i915_gem_active_peek(const struct i915_gem_active *active) +{ + /* Inside the error capture (running with the driver in an unknown + * state), we want to bend the rules slightly (a lot). + * + * Work is in progress to make it safer, in the meantime this keeps + * the known issue from spamming the logs. + */ + return rcu_dereference_protected(active->request, 1); +} + +/** + * i915_gem_active_raw - return the active request + * @active - the active tracker + * + * i915_gem_active_raw() returns the current request being tracked, or NULL. + * It does not obtain a reference on the request for the caller, so the caller + * must hold struct_mutex. + */ +static inline struct drm_i915_gem_request * +i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex) +{ + return rcu_dereference_protected(active->request, + lockdep_is_held(mutex)); +} + +/** + * i915_gem_active_peek - report the active request being monitored + * @active - the active tracker + * + * i915_gem_active_peek() returns the current request being tracked if + * still active, or NULL. It does not obtain a reference on the request + * for the caller, so the caller must hold struct_mutex. + */ +static inline struct drm_i915_gem_request * +i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex) +{ + struct drm_i915_gem_request *request; + + request = i915_gem_active_raw(active, mutex); + if (!request || i915_gem_request_completed(request)) + return NULL; + + return request; +} + +/** + * i915_gem_active_get - return a reference to the active request + * @active - the active tracker + * + * i915_gem_active_get() returns a reference to the active request, or NULL + * if the active tracker is idle. The caller must hold struct_mutex. + */ +static inline struct drm_i915_gem_request * +i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex) +{ + return i915_gem_request_get(i915_gem_active_peek(active, mutex)); +} + +/** + * __i915_gem_active_get_rcu - return a reference to the active request + * @active - the active tracker + * + * __i915_gem_active_get() returns a reference to the active request, or NULL + * if the active tracker is idle. The caller must hold the RCU read lock, but + * the returned pointer is safe to use outside of RCU. + */ +static inline struct drm_i915_gem_request * +__i915_gem_active_get_rcu(const struct i915_gem_active *active) +{ + /* Performing a lockless retrieval of the active request is super + * tricky. SLAB_DESTROY_BY_RCU merely guarantees that the backing + * slab of request objects will not be freed whilst we hold the + * RCU read lock. It does not guarantee that the request itself + * will not be freed and then *reused*. Viz, + * + * Thread A Thread B + * + * req = active.request + * retire(req) -> free(req); + * (req is now first on the slab freelist) + * active.request = NULL + * + * req = new submission on a new object + * ref(req) + * + * To prevent the request from being reused whilst the caller + * uses it, we take a reference like normal. Whilst acquiring + * the reference we check that it is not in a destroyed state + * (refcnt == 0). That prevents the request being reallocated + * whilst the caller holds on to it. To check that the request + * was not reallocated as we acquired the reference we have to + * check that our request remains the active request across + * the lookup, in the same manner as a seqlock. The visibility + * of the pointer versus the reference counting is controlled + * by using RCU barriers (rcu_dereference and rcu_assign_pointer). + * + * In the middle of all that, we inspect whether the request is + * complete. Retiring is lazy so the request may be completed long + * before the active tracker is updated. Querying whether the + * request is complete is far cheaper (as it involves no locked + * instructions setting cachelines to exclusive) than acquiring + * the reference, so we do it first. The RCU read lock ensures the + * pointer dereference is valid, but does not ensure that the + * seqno nor HWS is the right one! However, if the request was + * reallocated, that means the active tracker's request was complete. + * If the new request is also complete, then both are and we can + * just report the active tracker is idle. If the new request is + * incomplete, then we acquire a reference on it and check that + * it remained the active request. + * + * It is then imperative that we do not zero the request on + * reallocation, so that we can chase the dangling pointers! + * See i915_gem_request_alloc(). + */ + do { + struct drm_i915_gem_request *request; + + request = rcu_dereference(active->request); + if (!request || i915_gem_request_completed(request)) + return NULL; + + request = i915_gem_request_get_rcu(request); + + /* What stops the following rcu_access_pointer() from occurring + * before the above i915_gem_request_get_rcu()? If we were + * to read the value before pausing to get the reference to + * the request, we may not notice a change in the active + * tracker. + * + * The rcu_access_pointer() is a mere compiler barrier, which + * means both the CPU and compiler are free to perform the + * memory read without constraint. The compiler only has to + * ensure that any operations after the rcu_access_pointer() + * occur afterwards in program order. This means the read may + * be performed earlier by an out-of-order CPU, or adventurous + * compiler. + * + * The atomic operation at the heart of + * i915_gem_request_get_rcu(), see fence_get_rcu(), is + * atomic_inc_not_zero() which is only a full memory barrier + * when successful. That is, if i915_gem_request_get_rcu() + * returns the request (and so with the reference counted + * incremented) then the following read for rcu_access_pointer() + * must occur after the atomic operation and so confirm + * that this request is the one currently being tracked. + * + * The corresponding write barrier is part of + * rcu_assign_pointer(). + */ + if (!request || request == rcu_access_pointer(active->request)) + return rcu_pointer_handoff(request); + + i915_gem_request_put(request); + } while (1); +} + +/** + * i915_gem_active_get_unlocked - return a reference to the active request + * @active - the active tracker + * + * i915_gem_active_get_unlocked() returns a reference to the active request, + * or NULL if the active tracker is idle. The reference is obtained under RCU, + * so no locking is required by the caller. + * + * The reference should be freed with i915_gem_request_put(). + */ +static inline struct drm_i915_gem_request * +i915_gem_active_get_unlocked(const struct i915_gem_active *active) +{ + struct drm_i915_gem_request *request; + + rcu_read_lock(); + request = __i915_gem_active_get_rcu(active); + rcu_read_unlock(); + + return request; +} + +/** + * i915_gem_active_isset - report whether the active tracker is assigned + * @active - the active tracker + * + * i915_gem_active_isset() returns true if the active tracker is currently + * assigned to a request. Due to the lazy retiring, that request may be idle + * and this may report stale information. + */ +static inline bool +i915_gem_active_isset(const struct i915_gem_active *active) +{ + return rcu_access_pointer(active->request); +} + +/** + * i915_gem_active_is_idle - report whether the active tracker is idle + * @active - the active tracker + * + * i915_gem_active_is_idle() returns true if the active tracker is currently + * unassigned or if the request is complete (but not yet retired). Requires + * the caller to hold struct_mutex (but that can be relaxed if desired). + */ +static inline bool +i915_gem_active_is_idle(const struct i915_gem_active *active, + struct mutex *mutex) +{ + return !i915_gem_active_peek(active, mutex); +} + +/** + * i915_gem_active_wait - waits until the request is completed + * @active - the active request on which to wait + * + * i915_gem_active_wait() waits until the request is completed before + * returning. Note that it does not guarantee that the request is + * retired first, see i915_gem_active_retire(). + * + * i915_gem_active_wait() returns immediately if the active + * request is already complete. + */ +static inline int __must_check +i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex) +{ + struct drm_i915_gem_request *request; + + request = i915_gem_active_peek(active, mutex); + if (!request) + return 0; + + return i915_wait_request(request, true, NULL, NULL); +} + +/** + * i915_gem_active_wait_unlocked - waits until the request is completed + * @active - the active request on which to wait + * @interruptible - whether the wait can be woken by a userspace signal + * @timeout - how long to wait at most + * @rps - userspace client to charge for a waitboost + * + * i915_gem_active_wait_unlocked() waits until the request is completed before + * returning, without requiring any locks to be held. Note that it does not + * retire any requests before returning. + * + * This function relies on RCU in order to acquire the reference to the active + * request without holding any locks. See __i915_gem_active_get_rcu() for the + * glory details on how that is managed. Once the reference is acquired, we + * can then wait upon the request, and afterwards release our reference, + * free of any locking. + * + * This function wraps i915_wait_request(), see it for the full details on + * the arguments. + * + * Returns 0 if successful, or a negative error code. + */ +static inline int +i915_gem_active_wait_unlocked(const struct i915_gem_active *active, + bool interruptible, + s64 *timeout, + struct intel_rps_client *rps) +{ + struct drm_i915_gem_request *request; + int ret = 0; + + request = i915_gem_active_get_unlocked(active); + if (request) { + ret = i915_wait_request(request, interruptible, timeout, rps); + i915_gem_request_put(request); + } + + return ret; +} + +/** + * i915_gem_active_retire - waits until the request is retired + * @active - the active request on which to wait + * + * i915_gem_active_retire() waits until the request is completed, + * and then ensures that at least the retirement handler for this + * @active tracker is called before returning. If the @active + * tracker is idle, the function returns immediately. + */ +static inline int __must_check +i915_gem_active_retire(struct i915_gem_active *active, + struct mutex *mutex) +{ + struct drm_i915_gem_request *request; + int ret; + + request = i915_gem_active_raw(active, mutex); + if (!request) + return 0; + + ret = i915_wait_request(request, true, NULL, NULL); + if (ret) + return ret; + + list_del_init(&active->link); + RCU_INIT_POINTER(active->request, NULL); + + active->retire(active, request); + + return 0; +} + +/* Convenience functions for peeking at state inside active's request whilst + * guarded by the struct_mutex. + */ + +static inline uint32_t +i915_gem_active_get_seqno(const struct i915_gem_active *active, + struct mutex *mutex) +{ + return i915_gem_request_get_seqno(i915_gem_active_peek(active, mutex)); +} + +static inline struct intel_engine_cs * +i915_gem_active_get_engine(const struct i915_gem_active *active, + struct mutex *mutex) +{ + return i915_gem_request_get_engine(i915_gem_active_peek(active, mutex)); +} + +#define for_each_active(mask, idx) \ + for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx)) + +#endif /* I915_GEM_REQUEST_H */ diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 6f10b421487b..b80802b35353 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -48,19 +48,15 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) #endif } -static int num_vma_bound(struct drm_i915_gem_object *obj) +static bool any_vma_pinned(struct drm_i915_gem_object *obj) { struct i915_vma *vma; - int count = 0; - list_for_each_entry(vma, &obj->vma_list, obj_link) { - if (drm_mm_node_allocated(&vma->node)) - count++; - if (vma->pin_count) - count++; - } + list_for_each_entry(vma, &obj->vma_list, obj_link) + if (i915_vma_is_pinned(vma)) + return true; - return count; + return false; } static bool swap_available(void) @@ -82,7 +78,10 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) * to the GPU, simply unbinding from the GPU is not going to succeed * in releasing our pin count on the pages themselves. */ - if (obj->pages_pin_count != num_vma_bound(obj)) + if (obj->pages_pin_count > obj->bind_count) + return false; + + if (any_vma_pinned(obj)) return false; /* We can only return physical pages to the system if we can either @@ -163,17 +162,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, */ for (phase = phases; phase->list; phase++) { struct list_head still_in_list; + struct drm_i915_gem_object *obj; if ((flags & phase->bit) == 0) continue; INIT_LIST_HEAD(&still_in_list); - while (count < target && !list_empty(phase->list)) { - struct drm_i915_gem_object *obj; - struct i915_vma *vma, *v; - - obj = list_first_entry(phase->list, - typeof(*obj), global_list); + while (count < target && + (obj = list_first_entry_or_null(phase->list, + typeof(*obj), + global_list))) { list_move_tail(&obj->global_list, &still_in_list); if (flags & I915_SHRINK_PURGEABLE && @@ -184,24 +182,21 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, !is_vmalloc_addr(obj->mapping)) continue; - if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active) + if ((flags & I915_SHRINK_ACTIVE) == 0 && + i915_gem_object_is_active(obj)) continue; if (!can_release_pages(obj)) continue; - drm_gem_object_reference(&obj->base); + i915_gem_object_get(obj); /* For the unbound phase, this should be a no-op! */ - list_for_each_entry_safe(vma, v, - &obj->vma_list, obj_link) - if (i915_vma_unbind(vma)) - break; - + i915_gem_object_unbind(obj); if (i915_gem_object_put_pages(obj) == 0) count += obj->base.size >> PAGE_SHIFT; - drm_gem_object_unreference(&obj->base); + i915_gem_object_put(obj); } list_splice(&still_in_list, phase->list); } @@ -210,6 +205,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, intel_runtime_pm_put(dev_priv); i915_gem_retire_requests(dev_priv); + /* expedite the RCU grace period to free some request slabs */ + synchronize_rcu_expedited(); return count; } @@ -230,10 +227,15 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, */ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) { - return i915_gem_shrink(dev_priv, -1UL, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_ACTIVE); + unsigned long freed; + + freed = i915_gem_shrink(dev_priv, -1UL, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND | + I915_SHRINK_ACTIVE); + rcu_barrier(); /* wait until our RCU delayed slab frees are completed */ + + return freed; } static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) @@ -242,9 +244,6 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) if (!mutex_is_locked_by(&dev->struct_mutex, current)) return false; - if (to_i915(dev)->mm.shrinker_no_lock_stealing) - return false; - *unlock = false; } else *unlock = true; @@ -273,7 +272,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) count += obj->base.size >> PAGE_SHIFT; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - if (!obj->active && can_release_pages(obj)) + if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) count += obj->base.size >> PAGE_SHIFT; } @@ -321,17 +320,22 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, struct shrinker_lock_uninterruptible *slu, int timeout_ms) { - unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1; + unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms); + + do { + if (i915_gem_wait_for_idle(dev_priv, false) == 0 && + i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock)) + break; - while (!i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock)) { schedule_timeout_killable(1); if (fatal_signal_pending(current)) return false; - if (--timeout == 0) { + + if (time_after(jiffies, timeout)) { pr_err("Unable to lock GPU to purge memory.\n"); return false; } - } + } while (1); slu->was_interruptible = dev_priv->mm.interruptible; dev_priv->mm.interruptible = false; @@ -410,7 +414,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr return NOTIFY_DONE; /* Force everything onto the inactive lists */ - ret = i915_gem_wait_for_idle(dev_priv); + ret = i915_gem_wait_for_idle(dev_priv, false); if (ret) goto out; diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 66be299a1486..aa050fa1e558 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -115,17 +115,28 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) base = bsm & INTEL_BSM_MASK; } else if (IS_I865G(dev)) { + u32 tseg_size = 0; u16 toud = 0; + u8 tmp; + + pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0), + I845_ESMRAMC, &tmp); + + if (tmp & TSEG_ENABLE) { + switch (tmp & I845_TSEG_SIZE_MASK) { + case I845_TSEG_SIZE_512K: + tseg_size = KB(512); + break; + case I845_TSEG_SIZE_1M: + tseg_size = MB(1); + break; + } + } - /* - * FIXME is the graphics stolen memory region - * always at TOUD? Ie. is it always the last - * one to be allocated by the BIOS? - */ pci_bus_read_config_word(dev->pdev->bus, PCI_DEVFN(0, 0), I865_TOUD, &toud); - base = toud << 16; + base = (toud << 16) + tseg_size; } else if (IS_I85X(dev)) { u32 tseg_size = 0; u32 tom; @@ -685,7 +696,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, if (gtt_offset == I915_GTT_OFFSET_NONE) return obj; - vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base); + vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto err; @@ -698,24 +709,25 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, */ vma->node.start = gtt_offset; vma->node.size = size; - if (drm_mm_initialized(&ggtt->base.mm)) { - ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node); - if (ret) { - DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); - goto err; - } - vma->bound |= GLOBAL_BIND; - __i915_vma_set_map_and_fenceable(vma); - list_add_tail(&vma->vm_link, &ggtt->base.inactive_list); + ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node); + if (ret) { + DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); + goto err; } + vma->pages = obj->pages; + vma->flags |= I915_VMA_GLOBAL_BIND; + __i915_vma_set_map_and_fenceable(vma); + list_move_tail(&vma->vm_link, &ggtt->base.inactive_list); + obj->bind_count++; + list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); i915_gem_object_pin_pages(obj); return obj; err: - drm_gem_object_unreference(&obj->base); + i915_gem_object_put(obj); return NULL; } diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 8030199731db..a14b1e3d4c78 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -68,6 +68,9 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) if (tiling_mode == I915_TILING_NONE) return true; + if (tiling_mode > I915_TILING_LAST) + return false; + if (IS_GEN2(dev) || (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) tile_width = 128; @@ -113,36 +116,58 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) return true; } -/* Is the current GTT allocation valid for the change in tiling? */ -static bool -i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) +static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode) { + struct drm_i915_private *dev_priv = to_i915(vma->vm->dev); u32 size; - if (tiling_mode == I915_TILING_NONE) - return true; - - if (INTEL_INFO(obj->base.dev)->gen >= 4) + if (!i915_vma_is_map_and_fenceable(vma)) return true; - if (IS_GEN3(obj->base.dev)) { - if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) + if (INTEL_GEN(dev_priv) == 3) { + if (vma->node.start & ~I915_FENCE_START_MASK) return false; } else { - if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) + if (vma->node.start & ~I830_FENCE_START_MASK) return false; } - size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode); - if (i915_gem_obj_ggtt_size(obj) != size) + size = i915_gem_get_ggtt_size(dev_priv, vma->size, tiling_mode); + if (vma->node.size < size) return false; - if (i915_gem_obj_ggtt_offset(obj) & (size - 1)) + if (vma->node.start & (size - 1)) return false; return true; } +/* Make the current GTT allocation valid for the change in tiling. */ +static int +i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode) +{ + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + struct i915_vma *vma; + int ret; + + if (tiling_mode == I915_TILING_NONE) + return 0; + + if (INTEL_GEN(dev_priv) >= 4) + return 0; + + list_for_each_entry(vma, &obj->vma_list, obj_link) { + if (i915_vma_fence_prepare(vma, tiling_mode)) + continue; + + ret = i915_vma_unbind(vma); + if (ret) + return ret; + } + + return 0; +} + /** * i915_gem_set_tiling - IOCTL handler to set tiling mode * @dev: DRM device @@ -164,15 +189,18 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_i915_gem_set_tiling *args = data; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; - int ret = 0; + int err = 0; + + /* Make sure we don't cross-contaminate obj->tiling_and_stride */ + BUILD_BUG_ON(I915_TILING_LAST & STRIDE_MASK); - obj = to_intel_bo(drm_gem_object_lookup(file, args->handle)); - if (&obj->base == NULL) + obj = i915_gem_object_lookup(file, args->handle); + if (!obj) return -ENOENT; if (!i915_tiling_ok(dev, args->stride, obj->base.size, args->tiling_mode)) { - drm_gem_object_unreference_unlocked(&obj->base); + i915_gem_object_put_unlocked(obj); return -EINVAL; } @@ -180,7 +208,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); if (obj->pin_display || obj->framebuffer_references) { - ret = -EBUSY; + err = -EBUSY; goto err; } @@ -213,8 +241,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, } } - if (args->tiling_mode != obj->tiling_mode || - args->stride != obj->stride) { + if (args->tiling_mode != i915_gem_object_get_tiling(obj) || + args->stride != i915_gem_object_get_stride(obj)) { /* We need to rebind the object if its current allocation * no longer meets the alignment restrictions for its new * tiling mode. Otherwise we can just leave it alone, but @@ -227,34 +255,36 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, * has to also include the unfenced register the GPU uses * whilst executing a fenced command for an untiled object. */ - if (obj->map_and_fenceable && - !i915_gem_object_fence_ok(obj, args->tiling_mode)) - ret = i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); - if (ret == 0) { + err = i915_gem_object_fence_prepare(obj, args->tiling_mode); + if (!err) { + struct i915_vma *vma; + if (obj->pages && obj->madv == I915_MADV_WILLNEED && dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { if (args->tiling_mode == I915_TILING_NONE) i915_gem_object_unpin_pages(obj); - if (obj->tiling_mode == I915_TILING_NONE) + if (!i915_gem_object_is_tiled(obj)) i915_gem_object_pin_pages(obj); } - obj->fence_dirty = - obj->last_fenced_req || - obj->fence_reg != I915_FENCE_REG_NONE; + list_for_each_entry(vma, &obj->vma_list, obj_link) { + if (!vma->fence) + continue; - obj->tiling_mode = args->tiling_mode; - obj->stride = args->stride; + vma->fence->dirty = true; + } + obj->tiling_and_stride = + args->stride | args->tiling_mode; /* Force the fence to be reacquired for GTT access */ i915_gem_release_mmap(obj); } } /* we have to maintain this existing ABI... */ - args->stride = obj->stride; - args->tiling_mode = obj->tiling_mode; + args->stride = i915_gem_object_get_stride(obj); + args->tiling_mode = i915_gem_object_get_tiling(obj); /* Try to preallocate memory required to save swizzling on put-pages */ if (i915_gem_object_needs_bit17_swizzle(obj)) { @@ -268,12 +298,12 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, } err: - drm_gem_object_unreference(&obj->base); + i915_gem_object_put(obj); mutex_unlock(&dev->struct_mutex); intel_runtime_pm_put(dev_priv); - return ret; + return err; } /** @@ -297,14 +327,12 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; - obj = to_intel_bo(drm_gem_object_lookup(file, args->handle)); - if (&obj->base == NULL) + obj = i915_gem_object_lookup(file, args->handle); + if (!obj) return -ENOENT; - mutex_lock(&dev->struct_mutex); - - args->tiling_mode = obj->tiling_mode; - switch (obj->tiling_mode) { + args->tiling_mode = READ_ONCE(obj->tiling_and_stride) & TILING_MASK; + switch (args->tiling_mode) { case I915_TILING_X: args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; break; @@ -328,8 +356,6 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; - drm_gem_object_unreference(&obj->base); - mutex_unlock(&dev->struct_mutex); - + i915_gem_object_put_unlocked(obj); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 2314c88323e3..be54825ef3e8 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -63,33 +63,12 @@ struct i915_mmu_object { static void wait_rendering(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_gem_request *requests[I915_NUM_ENGINES]; - int i, n; - - if (!obj->active) - return; - - n = 0; - for (i = 0; i < I915_NUM_ENGINES; i++) { - struct drm_i915_gem_request *req; - - req = obj->last_read_req[i]; - if (req == NULL) - continue; - - requests[n++] = i915_gem_request_reference(req); - } - - mutex_unlock(&dev->struct_mutex); - - for (i = 0; i < n; i++) - __i915_wait_request(requests[i], false, NULL, NULL); - - mutex_lock(&dev->struct_mutex); + unsigned long active = __I915_BO_ACTIVE(obj); + int idx; - for (i = 0; i < n; i++) - i915_gem_request_unreference(requests[i]); + for_each_active(active, idx) + i915_gem_active_wait_unlocked(&obj->last_read[idx], + false, NULL, NULL); } static void cancel_userptr(struct work_struct *work) @@ -98,28 +77,19 @@ static void cancel_userptr(struct work_struct *work) struct drm_i915_gem_object *obj = mo->obj; struct drm_device *dev = obj->base.dev; + wait_rendering(obj); + mutex_lock(&dev->struct_mutex); /* Cancel any active worker and force us to re-evaluate gup */ obj->userptr.work = NULL; if (obj->pages != NULL) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_vma *vma, *tmp; - bool was_interruptible; - - wait_rendering(obj); - - was_interruptible = dev_priv->mm.interruptible; - dev_priv->mm.interruptible = false; - - list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) - WARN_ON(i915_vma_unbind(vma)); + /* We are inside a kthread context and can't be interrupted */ + WARN_ON(i915_gem_object_unbind(obj)); WARN_ON(i915_gem_object_put_pages(obj)); - - dev_priv->mm.interruptible = was_interruptible; } - drm_gem_object_unreference(&obj->base); + i915_gem_object_put(obj); mutex_unlock(&dev->struct_mutex); } @@ -572,12 +542,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) } } obj->userptr.work = ERR_PTR(ret); - if (ret) - __i915_gem_userptr_set_active(obj, false); } obj->userptr.workers--; - drm_gem_object_unreference(&obj->base); + i915_gem_object_put(obj); mutex_unlock(&dev->struct_mutex); release_pages(pvec, pinned, 0); @@ -622,8 +590,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj, obj->userptr.work = &work->work; obj->userptr.workers++; - work->obj = obj; - drm_gem_object_reference(&obj->base); + work->obj = i915_gem_object_get(obj); work->task = current; get_task_struct(work->task); @@ -659,15 +626,14 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) * to the vma (discard or cloning) which should prevent the more * egregious cases from causing harm. */ - if (IS_ERR(obj->userptr.work)) { - /* active flag will have been dropped already by the worker */ - ret = PTR_ERR(obj->userptr.work); - obj->userptr.work = NULL; - return ret; - } - if (obj->userptr.work) + + if (obj->userptr.work) { /* active flag should still be held for the pending work */ - return -EAGAIN; + if (IS_ERR(obj->userptr.work)) + return PTR_ERR(obj->userptr.work); + else + return -EAGAIN; + } /* Let the mmu-notifier know that we have begun and need cancellation */ ret = __i915_gem_userptr_set_active(obj, true); @@ -846,7 +812,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file ret = drm_gem_handle_create(file, &obj->base, &handle); /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(&obj->base); + i915_gem_object_put_unlocked(obj); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 9d73d2216adc..41ec7a183c73 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -30,9 +30,9 @@ #include <generated/utsrelease.h> #include "i915_drv.h" -static const char *ring_str(int ring) +static const char *engine_str(int engine) { - switch (ring) { + switch (engine) { case RCS: return "render"; case VCS: return "bsd"; case BCS: return "blt"; @@ -42,16 +42,6 @@ static const char *ring_str(int ring) } } -static const char *pin_flag(int pinned) -{ - if (pinned > 0) - return " P"; - else if (pinned < 0) - return " p"; - else - return ""; -} - static const char *tiling_flag(int tiling) { switch (tiling) { @@ -189,7 +179,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, { int i; - err_printf(m, " %s [%d]:\n", name, count); + err_printf(m, "%s [%d]:\n", name, count); while (count--) { err_printf(m, " %08x_%08x %8u %02x %02x [ ", @@ -202,13 +192,12 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, err_printf(m, "%02x ", err->rseqno[i]); err_printf(m, "] %02x", err->wseqno); - err_puts(m, pin_flag(err->pinned)); err_puts(m, tiling_flag(err->tiling)); err_puts(m, dirty_flag(err->dirty)); err_puts(m, purgeable_flag(err->purgeable)); err_puts(m, err->userptr ? " userptr" : ""); - err_puts(m, err->ring != -1 ? " " : ""); - err_puts(m, ring_str(err->ring)); + err_puts(m, err->engine != -1 ? " " : ""); + err_puts(m, engine_str(err->engine)); err_puts(m, i915_cache_level_str(m->i915, err->cache_level)); if (err->name) @@ -221,7 +210,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, } } -static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a) +static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a) { switch (a) { case HANGCHECK_IDLE: @@ -239,70 +228,74 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a) return "unknown"; } -static void i915_ring_error_state(struct drm_i915_error_state_buf *m, - struct drm_device *dev, - struct drm_i915_error_state *error, - int ring_idx) +static void error_print_engine(struct drm_i915_error_state_buf *m, + struct drm_i915_error_engine *ee) { - struct drm_i915_error_ring *ring = &error->ring[ring_idx]; - - if (!ring->valid) - return; - - err_printf(m, "%s command stream:\n", ring_str(ring_idx)); - err_printf(m, " START: 0x%08x\n", ring->start); - err_printf(m, " HEAD: 0x%08x\n", ring->head); - err_printf(m, " TAIL: 0x%08x\n", ring->tail); - err_printf(m, " CTL: 0x%08x\n", ring->ctl); - err_printf(m, " HWS: 0x%08x\n", ring->hws); - err_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd); - err_printf(m, " IPEIR: 0x%08x\n", ring->ipeir); - err_printf(m, " IPEHR: 0x%08x\n", ring->ipehr); - err_printf(m, " INSTDONE: 0x%08x\n", ring->instdone); - if (INTEL_INFO(dev)->gen >= 4) { - err_printf(m, " BBADDR: 0x%08x %08x\n", (u32)(ring->bbaddr>>32), (u32)ring->bbaddr); - err_printf(m, " BB_STATE: 0x%08x\n", ring->bbstate); - err_printf(m, " INSTPS: 0x%08x\n", ring->instps); + err_printf(m, "%s command stream:\n", engine_str(ee->engine_id)); + err_printf(m, " START: 0x%08x\n", ee->start); + err_printf(m, " HEAD: 0x%08x\n", ee->head); + err_printf(m, " TAIL: 0x%08x\n", ee->tail); + err_printf(m, " CTL: 0x%08x\n", ee->ctl); + err_printf(m, " MODE: 0x%08x\n", ee->mode); + err_printf(m, " HWS: 0x%08x\n", ee->hws); + err_printf(m, " ACTHD: 0x%08x %08x\n", + (u32)(ee->acthd>>32), (u32)ee->acthd); + err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir); + err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr); + err_printf(m, " INSTDONE: 0x%08x\n", ee->instdone); + if (ee->batchbuffer) { + u64 start = ee->batchbuffer->gtt_offset; + u64 end = start + ee->batchbuffer->gtt_size; + + err_printf(m, " batch: [0x%08x_%08x, 0x%08x_%08x]\n", + upper_32_bits(start), lower_32_bits(start), + upper_32_bits(end), lower_32_bits(end)); } - err_printf(m, " INSTPM: 0x%08x\n", ring->instpm); - err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ring->faddr), - lower_32_bits(ring->faddr)); - if (INTEL_INFO(dev)->gen >= 6) { - err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi); - err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg); + if (INTEL_GEN(m->i915) >= 4) { + err_printf(m, " BBADDR: 0x%08x_%08x\n", + (u32)(ee->bbaddr>>32), (u32)ee->bbaddr); + err_printf(m, " BB_STATE: 0x%08x\n", ee->bbstate); + err_printf(m, " INSTPS: 0x%08x\n", ee->instps); + } + err_printf(m, " INSTPM: 0x%08x\n", ee->instpm); + err_printf(m, " FADDR: 0x%08x %08x\n", upper_32_bits(ee->faddr), + lower_32_bits(ee->faddr)); + if (INTEL_GEN(m->i915) >= 6) { + err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi); + err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg); err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n", - ring->semaphore_mboxes[0], - ring->semaphore_seqno[0]); + ee->semaphore_mboxes[0], + ee->semaphore_seqno[0]); err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", - ring->semaphore_mboxes[1], - ring->semaphore_seqno[1]); - if (HAS_VEBOX(dev)) { + ee->semaphore_mboxes[1], + ee->semaphore_seqno[1]); + if (HAS_VEBOX(m->i915)) { err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n", - ring->semaphore_mboxes[2], - ring->semaphore_seqno[2]); + ee->semaphore_mboxes[2], + ee->semaphore_seqno[2]); } } - if (USES_PPGTT(dev)) { - err_printf(m, " GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode); + if (USES_PPGTT(m->i915)) { + err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode); - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(m->i915) >= 8) { int i; for (i = 0; i < 4; i++) err_printf(m, " PDP%d: 0x%016llx\n", - i, ring->vm_info.pdp[i]); + i, ee->vm_info.pdp[i]); } else { err_printf(m, " PP_DIR_BASE: 0x%08x\n", - ring->vm_info.pp_dir_base); + ee->vm_info.pp_dir_base); } } - err_printf(m, " seqno: 0x%08x\n", ring->seqno); - err_printf(m, " last_seqno: 0x%08x\n", ring->last_seqno); - err_printf(m, " waiting: %s\n", yesno(ring->waiting)); - err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head); - err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail); + err_printf(m, " seqno: 0x%08x\n", ee->seqno); + err_printf(m, " last_seqno: 0x%08x\n", ee->last_seqno); + err_printf(m, " waiting: %s\n", yesno(ee->waiting)); + err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head); + err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail); err_printf(m, " hangcheck: %s [%d]\n", - hangcheck_action_to_str(ring->hangcheck_action), - ring->hangcheck_score); + hangcheck_action_to_str(ee->hangcheck_action), + ee->hangcheck_score); } void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) @@ -328,6 +321,16 @@ static void print_error_obj(struct drm_i915_error_state_buf *m, } } +static void err_print_capabilities(struct drm_i915_error_state_buf *m, + const struct intel_device_info *info) +{ +#define PRINT_FLAG(x) err_printf(m, #x ": %s\n", yesno(info->x)) +#define SEP_SEMICOLON ; + DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON); +#undef PRINT_FLAG +#undef SEP_SEMICOLON +} + int i915_error_state_to_str(struct drm_i915_error_state_buf *m, const struct i915_error_state_file_priv *error_priv) { @@ -347,18 +350,19 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, error->time.tv_usec); err_printf(m, "Kernel: " UTS_RELEASE "\n"); + err_print_capabilities(m, &error->device_info); max_hangcheck_score = 0; - for (i = 0; i < ARRAY_SIZE(error->ring); i++) { - if (error->ring[i].hangcheck_score > max_hangcheck_score) - max_hangcheck_score = error->ring[i].hangcheck_score; + for (i = 0; i < ARRAY_SIZE(error->engine); i++) { + if (error->engine[i].hangcheck_score > max_hangcheck_score) + max_hangcheck_score = error->engine[i].hangcheck_score; } - for (i = 0; i < ARRAY_SIZE(error->ring); i++) { - if (error->ring[i].hangcheck_score == max_hangcheck_score && - error->ring[i].pid != -1) { + for (i = 0; i < ARRAY_SIZE(error->engine); i++) { + if (error->engine[i].hangcheck_score == max_hangcheck_score && + error->engine[i].pid != -1) { err_printf(m, "Active process (on ring %s): %s [%d]\n", - ring_str(i), - error->ring[i].comm, - error->ring[i].pid); + engine_str(i), + error->engine[i].comm, + error->engine[i].pid); } } err_printf(m, "Reset count: %u\n", error->reset_count); @@ -414,36 +418,55 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, if (IS_GEN7(dev)) err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); - for (i = 0; i < ARRAY_SIZE(error->ring); i++) - i915_ring_error_state(m, dev, error, i); + for (i = 0; i < ARRAY_SIZE(error->engine); i++) { + if (error->engine[i].engine_id != -1) + error_print_engine(m, &error->engine[i]); + } + + for (i = 0; i < ARRAY_SIZE(error->active_vm); i++) { + char buf[128]; + int len, first = 1; - for (i = 0; i < error->vm_count; i++) { - err_printf(m, "vm[%d]\n", i); + if (!error->active_vm[i]) + break; - print_error_buffers(m, "Active", + len = scnprintf(buf, sizeof(buf), "Active ("); + for (j = 0; j < ARRAY_SIZE(error->engine); j++) { + if (error->engine[j].vm != error->active_vm[i]) + continue; + + len += scnprintf(buf + len, sizeof(buf), "%s%s", + first ? "" : ", ", + dev_priv->engine[j].name); + first = 0; + } + scnprintf(buf + len, sizeof(buf), ")"); + print_error_buffers(m, buf, error->active_bo[i], error->active_bo_count[i]); - - print_error_buffers(m, "Pinned", - error->pinned_bo[i], - error->pinned_bo_count[i]); } - for (i = 0; i < ARRAY_SIZE(error->ring); i++) { - obj = error->ring[i].batchbuffer; + print_error_buffers(m, "Pinned (global)", + error->pinned_bo, + error->pinned_bo_count); + + for (i = 0; i < ARRAY_SIZE(error->engine); i++) { + struct drm_i915_error_engine *ee = &error->engine[i]; + + obj = ee->batchbuffer; if (obj) { err_puts(m, dev_priv->engine[i].name); - if (error->ring[i].pid != -1) + if (ee->pid != -1) err_printf(m, " (submitted by %s [%d])", - error->ring[i].comm, - error->ring[i].pid); + ee->comm, + ee->pid); err_printf(m, " --- gtt_offset = 0x%08x %08x\n", upper_32_bits(obj->gtt_offset), lower_32_bits(obj->gtt_offset)); print_error_obj(m, obj); } - obj = error->ring[i].wa_batchbuffer; + obj = ee->wa_batchbuffer; if (obj) { err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n", dev_priv->engine[i].name, @@ -451,38 +474,40 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, print_error_obj(m, obj); } - if (error->ring[i].num_requests) { + if (ee->num_requests) { err_printf(m, "%s --- %d requests\n", dev_priv->engine[i].name, - error->ring[i].num_requests); - for (j = 0; j < error->ring[i].num_requests; j++) { - err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", - error->ring[i].requests[j].seqno, - error->ring[i].requests[j].jiffies, - error->ring[i].requests[j].tail); + ee->num_requests); + for (j = 0; j < ee->num_requests; j++) { + err_printf(m, " pid %d, seqno 0x%08x, emitted %ld, head 0x%08x, tail 0x%08x\n", + ee->requests[j].pid, + ee->requests[j].seqno, + ee->requests[j].jiffies, + ee->requests[j].head, + ee->requests[j].tail); } } - if (error->ring[i].num_waiters) { + if (ee->num_waiters) { err_printf(m, "%s --- %d waiters\n", dev_priv->engine[i].name, - error->ring[i].num_waiters); - for (j = 0; j < error->ring[i].num_waiters; j++) { + ee->num_waiters); + for (j = 0; j < ee->num_waiters; j++) { err_printf(m, " seqno 0x%08x for %s [%d]\n", - error->ring[i].waiters[j].seqno, - error->ring[i].waiters[j].comm, - error->ring[i].waiters[j].pid); + ee->waiters[j].seqno, + ee->waiters[j].comm, + ee->waiters[j].pid); } } - if ((obj = error->ring[i].ringbuffer)) { + if ((obj = ee->ringbuffer)) { err_printf(m, "%s --- ringbuffer = 0x%08x\n", dev_priv->engine[i].name, lower_32_bits(obj->gtt_offset)); print_error_obj(m, obj); } - if ((obj = error->ring[i].hws_page)) { + if ((obj = ee->hws_page)) { u64 hws_offset = obj->gtt_offset; u32 *hws_page = &obj->pages[0][0]; @@ -504,7 +529,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, } } - obj = error->ring[i].wa_ctx; + obj = ee->wa_ctx; if (obj) { u64 wa_ctx_offset = obj->gtt_offset; u32 *wa_ctx_page = &obj->pages[0][0]; @@ -526,7 +551,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, } } - if ((obj = error->ring[i].ctx)) { + if ((obj = ee->ctx)) { err_printf(m, "%s --- HW Context = 0x%08x\n", dev_priv->engine[i].name, lower_32_bits(obj->gtt_offset)); @@ -534,7 +559,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, } } - if ((obj = error->semaphore_obj)) { + if ((obj = error->semaphore)) { err_printf(m, "Semaphore page = 0x%08x\n", lower_32_bits(obj->gtt_offset)); for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { @@ -611,26 +636,26 @@ static void i915_error_state_free(struct kref *error_ref) typeof(*error), ref); int i; - for (i = 0; i < ARRAY_SIZE(error->ring); i++) { - i915_error_object_free(error->ring[i].batchbuffer); - i915_error_object_free(error->ring[i].wa_batchbuffer); - i915_error_object_free(error->ring[i].ringbuffer); - i915_error_object_free(error->ring[i].hws_page); - i915_error_object_free(error->ring[i].ctx); - i915_error_object_free(error->ring[i].wa_ctx); - kfree(error->ring[i].requests); - kfree(error->ring[i].waiters); + for (i = 0; i < ARRAY_SIZE(error->engine); i++) { + struct drm_i915_error_engine *ee = &error->engine[i]; + + i915_error_object_free(ee->batchbuffer); + i915_error_object_free(ee->wa_batchbuffer); + i915_error_object_free(ee->ringbuffer); + i915_error_object_free(ee->hws_page); + i915_error_object_free(ee->ctx); + i915_error_object_free(ee->wa_ctx); + + kfree(ee->requests); + kfree(ee->waiters); } - i915_error_object_free(error->semaphore_obj); + i915_error_object_free(error->semaphore); - for (i = 0; i < error->vm_count; i++) + for (i = 0; i < ARRAY_SIZE(error->active_bo); i++) kfree(error->active_bo[i]); - - kfree(error->active_bo); - kfree(error->active_bo_count); kfree(error->pinned_bo); - kfree(error->pinned_bo_count); + kfree(error->overlay); kfree(error->display); kfree(error); @@ -638,46 +663,45 @@ static void i915_error_state_free(struct kref *error_ref) static struct drm_i915_error_object * i915_error_object_create(struct drm_i915_private *dev_priv, - struct drm_i915_gem_object *src, - struct i915_address_space *vm) + struct i915_vma *vma) { struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct drm_i915_gem_object *src; struct drm_i915_error_object *dst; - struct i915_vma *vma = NULL; int num_pages; bool use_ggtt; int i = 0; u64 reloc_offset; - if (src == NULL || src->pages == NULL) + if (!vma) + return NULL; + + src = vma->obj; + if (!src->pages) return NULL; num_pages = src->base.size >> PAGE_SHIFT; dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); - if (dst == NULL) + if (!dst) return NULL; - if (i915_gem_obj_bound(src, vm)) - dst->gtt_offset = i915_gem_obj_offset(src, vm); - else - dst->gtt_offset = -1; + dst->gtt_offset = vma->node.start; + dst->gtt_size = vma->node.size; reloc_offset = dst->gtt_offset; - if (i915_is_ggtt(vm)) - vma = i915_gem_obj_to_ggtt(src); use_ggtt = (src->cache_level == I915_CACHE_NONE && - vma && (vma->bound & GLOBAL_BIND) && + (vma->flags & I915_VMA_GLOBAL_BIND) && reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end); /* Cannot access stolen address directly, try to use the aperture */ if (src->stolen) { use_ggtt = true; - if (!(vma && vma->bound & GLOBAL_BIND)) + if (!(vma->flags & I915_VMA_GLOBAL_BIND)) goto unwind; - reloc_offset = i915_gem_obj_ggtt_offset(src); + reloc_offset = vma->node.start; if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end) goto unwind; } @@ -705,7 +729,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv, * captures what the GPU read. */ - s = io_mapping_map_atomic_wc(ggtt->mappable, + s = io_mapping_map_atomic_wc(&ggtt->mappable, reloc_offset); memcpy_fromio(d, s, PAGE_SIZE); io_mapping_unmap_atomic(s); @@ -737,8 +761,24 @@ unwind: kfree(dst); return NULL; } -#define i915_error_ggtt_object_create(dev_priv, src) \ - i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base) + +/* The error capture is special as tries to run underneath the normal + * locking rules - so we use the raw version of the i915_gem_active lookup. + */ +static inline uint32_t +__active_get_seqno(struct i915_gem_active *active) +{ + return i915_gem_request_get_seqno(__i915_gem_active_peek(active)); +} + +static inline int +__active_get_engine_id(struct i915_gem_active *active) +{ + struct intel_engine_cs *engine; + + engine = i915_gem_request_get_engine(__i915_gem_active_peek(active)); + return engine ? engine->id : -1; +} static void capture_bo(struct drm_i915_error_buffer *err, struct i915_vma *vma) @@ -748,32 +788,34 @@ static void capture_bo(struct drm_i915_error_buffer *err, err->size = obj->base.size; err->name = obj->base.name; + for (i = 0; i < I915_NUM_ENGINES; i++) - err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]); - err->wseqno = i915_gem_request_get_seqno(obj->last_write_req); + err->rseqno[i] = __active_get_seqno(&obj->last_read[i]); + err->wseqno = __active_get_seqno(&obj->last_write); + err->engine = __active_get_engine_id(&obj->last_write); + err->gtt_offset = vma->node.start; err->read_domains = obj->base.read_domains; err->write_domain = obj->base.write_domain; - err->fence_reg = obj->fence_reg; - err->pinned = 0; - if (i915_gem_obj_is_pinned(obj)) - err->pinned = 1; - err->tiling = obj->tiling_mode; + err->fence_reg = vma->fence ? vma->fence->id : -1; + err->tiling = i915_gem_object_get_tiling(obj); err->dirty = obj->dirty; err->purgeable = obj->madv != I915_MADV_WILLNEED; err->userptr = obj->userptr.mm != NULL; - err->ring = obj->last_write_req ? - i915_gem_request_get_engine(obj->last_write_req)->id : -1; err->cache_level = obj->cache_level; } -static u32 capture_active_bo(struct drm_i915_error_buffer *err, - int count, struct list_head *head) +static u32 capture_error_bo(struct drm_i915_error_buffer *err, + int count, struct list_head *head, + bool pinned_only) { struct i915_vma *vma; int i = 0; list_for_each_entry(vma, head, vm_link) { + if (pinned_only && !i915_vma_is_pinned(vma)) + continue; + capture_bo(err++, vma); if (++i == count) break; @@ -782,28 +824,6 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err, return i; } -static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, - int count, struct list_head *head, - struct i915_address_space *vm) -{ - struct drm_i915_gem_object *obj; - struct drm_i915_error_buffer * const first = err; - struct drm_i915_error_buffer * const last = err + count; - - list_for_each_entry(obj, head, global_list) { - struct i915_vma *vma; - - if (err == last) - break; - - list_for_each_entry(vma, &obj->vma_list, obj_link) - if (vma->vm == vm && vma->pin_count > 0) - capture_bo(err++, vma); - } - - return err - first; -} - /* Generate a semi-unique error code. The code is not meant to have meaning, The * code's only purpose is to try to prevent false duplicated bug reports by * grossly estimating a GPU error state. @@ -815,7 +835,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, */ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error, - int *ring_id) + int *engine_id) { uint32_t error_code = 0; int i; @@ -826,11 +846,11 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv, * strictly a client bug. Use instdone to differentiate those some. */ for (i = 0; i < I915_NUM_ENGINES; i++) { - if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) { - if (ring_id) - *ring_id = i; + if (error->engine[i].hangcheck_action == HANGCHECK_HUNG) { + if (engine_id) + *engine_id = i; - return error->ring[i].ipehr ^ error->ring[i].instdone; + return error->engine[i].ipehr ^ error->engine[i].instdone; } } @@ -855,22 +875,17 @@ static void i915_gem_record_fences(struct drm_i915_private *dev_priv, } -static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv, - struct drm_i915_error_state *error, +static void gen8_record_semaphore_state(struct drm_i915_error_state *error, struct intel_engine_cs *engine, - struct drm_i915_error_ring *ering) + struct drm_i915_error_engine *ee) { + struct drm_i915_private *dev_priv = engine->i915; struct intel_engine_cs *to; enum intel_engine_id id; - if (!i915_semaphore_is_enabled(dev_priv)) + if (!error->semaphore) return; - if (!error->semaphore_obj) - error->semaphore_obj = - i915_error_ggtt_object_create(dev_priv, - dev_priv->semaphore_obj); - for_each_engine_id(to, dev_priv, id) { int idx; u16 signal_offset; @@ -879,42 +894,43 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv, if (engine == to) continue; - signal_offset = (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) - / 4; - tmp = error->semaphore_obj->pages[0]; - idx = intel_ring_sync_index(engine, to); + signal_offset = + (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4; + tmp = error->semaphore->pages[0]; + idx = intel_engine_sync_index(engine, to); - ering->semaphore_mboxes[idx] = tmp[signal_offset]; - ering->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx]; + ee->semaphore_mboxes[idx] = tmp[signal_offset]; + ee->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx]; } } -static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv, - struct intel_engine_cs *engine, - struct drm_i915_error_ring *ering) +static void gen6_record_semaphore_state(struct intel_engine_cs *engine, + struct drm_i915_error_engine *ee) { - ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base)); - ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base)); - ering->semaphore_seqno[0] = engine->semaphore.sync_seqno[0]; - ering->semaphore_seqno[1] = engine->semaphore.sync_seqno[1]; + struct drm_i915_private *dev_priv = engine->i915; + + ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base)); + ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base)); + ee->semaphore_seqno[0] = engine->semaphore.sync_seqno[0]; + ee->semaphore_seqno[1] = engine->semaphore.sync_seqno[1]; if (HAS_VEBOX(dev_priv)) { - ering->semaphore_mboxes[2] = + ee->semaphore_mboxes[2] = I915_READ(RING_SYNC_2(engine->mmio_base)); - ering->semaphore_seqno[2] = engine->semaphore.sync_seqno[2]; + ee->semaphore_seqno[2] = engine->semaphore.sync_seqno[2]; } } -static void engine_record_waiters(struct intel_engine_cs *engine, - struct drm_i915_error_ring *ering) +static void error_record_engine_waiters(struct intel_engine_cs *engine, + struct drm_i915_error_engine *ee) { struct intel_breadcrumbs *b = &engine->breadcrumbs; struct drm_i915_error_waiter *waiter; struct rb_node *rb; int count; - ering->num_waiters = 0; - ering->waiters = NULL; + ee->num_waiters = 0; + ee->waiters = NULL; spin_lock(&b->lock); count = 0; @@ -930,7 +946,7 @@ static void engine_record_waiters(struct intel_engine_cs *engine, if (!waiter) return; - ering->waiters = waiter; + ee->waiters = waiter; spin_lock(&b->lock); for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { @@ -941,55 +957,57 @@ static void engine_record_waiters(struct intel_engine_cs *engine, waiter->seqno = w->seqno; waiter++; - if (++ering->num_waiters == count) + if (++ee->num_waiters == count) break; } spin_unlock(&b->lock); } -static void i915_record_ring_state(struct drm_i915_private *dev_priv, - struct drm_i915_error_state *error, - struct intel_engine_cs *engine, - struct drm_i915_error_ring *ering) +static void error_record_engine_registers(struct drm_i915_error_state *error, + struct intel_engine_cs *engine, + struct drm_i915_error_engine *ee) { + struct drm_i915_private *dev_priv = engine->i915; + if (INTEL_GEN(dev_priv) >= 6) { - ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base)); - ering->fault_reg = I915_READ(RING_FAULT_REG(engine)); + ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base)); + ee->fault_reg = I915_READ(RING_FAULT_REG(engine)); if (INTEL_GEN(dev_priv) >= 8) - gen8_record_semaphore_state(dev_priv, error, engine, - ering); + gen8_record_semaphore_state(error, engine, ee); else - gen6_record_semaphore_state(dev_priv, engine, ering); + gen6_record_semaphore_state(engine, ee); } if (INTEL_GEN(dev_priv) >= 4) { - ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base)); - ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base)); - ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); - ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base)); - ering->instps = I915_READ(RING_INSTPS(engine->mmio_base)); - ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base)); + ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base)); + ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base)); + ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); + ee->instdone = I915_READ(RING_INSTDONE(engine->mmio_base)); + ee->instps = I915_READ(RING_INSTPS(engine->mmio_base)); + ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base)); if (INTEL_GEN(dev_priv) >= 8) { - ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32; - ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32; + ee->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32; + ee->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32; } - ering->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base)); + ee->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base)); } else { - ering->faddr = I915_READ(DMA_FADD_I8XX); - ering->ipeir = I915_READ(IPEIR); - ering->ipehr = I915_READ(IPEHR); - ering->instdone = I915_READ(GEN2_INSTDONE); + ee->faddr = I915_READ(DMA_FADD_I8XX); + ee->ipeir = I915_READ(IPEIR); + ee->ipehr = I915_READ(IPEHR); + ee->instdone = I915_READ(GEN2_INSTDONE); } - ering->waiting = intel_engine_has_waiter(engine); - ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base)); - ering->acthd = intel_ring_get_active_head(engine); - ering->seqno = intel_engine_get_seqno(engine); - ering->last_seqno = engine->last_submitted_seqno; - ering->start = I915_READ_START(engine); - ering->head = I915_READ_HEAD(engine); - ering->tail = I915_READ_TAIL(engine); - ering->ctl = I915_READ_CTL(engine); + ee->waiting = intel_engine_has_waiter(engine); + ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base)); + ee->acthd = intel_engine_get_active_head(engine); + ee->seqno = intel_engine_get_seqno(engine); + ee->last_seqno = engine->last_submitted_seqno; + ee->start = I915_READ_START(engine); + ee->head = I915_READ_HEAD(engine); + ee->tail = I915_READ_TAIL(engine); + ee->ctl = I915_READ_CTL(engine); + if (INTEL_GEN(dev_priv) > 2) + ee->mode = I915_READ_MODE(engine); if (I915_NEED_GFX_HWS(dev_priv)) { i915_reg_t mmio; @@ -1017,107 +1035,150 @@ static void i915_record_ring_state(struct drm_i915_private *dev_priv, mmio = RING_HWS_PGA(engine->mmio_base); } - ering->hws = I915_READ(mmio); + ee->hws = I915_READ(mmio); } - ering->hangcheck_score = engine->hangcheck.score; - ering->hangcheck_action = engine->hangcheck.action; + ee->hangcheck_score = engine->hangcheck.score; + ee->hangcheck_action = engine->hangcheck.action; if (USES_PPGTT(dev_priv)) { int i; - ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine)); + ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine)); if (IS_GEN6(dev_priv)) - ering->vm_info.pp_dir_base = + ee->vm_info.pp_dir_base = I915_READ(RING_PP_DIR_BASE_READ(engine)); else if (IS_GEN7(dev_priv)) - ering->vm_info.pp_dir_base = + ee->vm_info.pp_dir_base = I915_READ(RING_PP_DIR_BASE(engine)); else if (INTEL_GEN(dev_priv) >= 8) for (i = 0; i < 4; i++) { - ering->vm_info.pdp[i] = + ee->vm_info.pdp[i] = I915_READ(GEN8_RING_PDP_UDW(engine, i)); - ering->vm_info.pdp[i] <<= 32; - ering->vm_info.pdp[i] |= + ee->vm_info.pdp[i] <<= 32; + ee->vm_info.pdp[i] |= I915_READ(GEN8_RING_PDP_LDW(engine, i)); } } } - -static void i915_gem_record_active_context(struct intel_engine_cs *engine, - struct drm_i915_error_state *error, - struct drm_i915_error_ring *ering) +static void engine_record_requests(struct intel_engine_cs *engine, + struct drm_i915_gem_request *first, + struct drm_i915_error_engine *ee) { - struct drm_i915_private *dev_priv = engine->i915; - struct drm_i915_gem_object *obj; + struct drm_i915_gem_request *request; + int count; + + count = 0; + request = first; + list_for_each_entry_from(request, &engine->request_list, link) + count++; + if (!count) + return; - /* Currently render ring is the only HW context user */ - if (engine->id != RCS || !error->ccid) + ee->requests = kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC); + if (!ee->requests) return; - list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - if (!i915_gem_obj_ggtt_bound(obj)) - continue; + ee->num_requests = count; - if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) { - ering->ctx = i915_error_ggtt_object_create(dev_priv, obj); + count = 0; + request = first; + list_for_each_entry_from(request, &engine->request_list, link) { + struct drm_i915_error_request *erq; + + if (count >= ee->num_requests) { + /* + * If the ring request list was changed in + * between the point where the error request + * list was created and dimensioned and this + * point then just exit early to avoid crashes. + * + * We don't need to communicate that the + * request list changed state during error + * state capture and that the error state is + * slightly incorrect as a consequence since we + * are typically only interested in the request + * list state at the point of error state + * capture, not in any changes happening during + * the capture. + */ break; } + + erq = &ee->requests[count++]; + erq->seqno = request->fence.seqno; + erq->jiffies = request->emitted_jiffies; + erq->head = request->head; + erq->tail = request->tail; + + rcu_read_lock(); + erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0; + rcu_read_unlock(); } + ee->num_requests = count; } static void i915_gem_record_rings(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error) { struct i915_ggtt *ggtt = &dev_priv->ggtt; - struct drm_i915_gem_request *request; - int i, count; + int i; + + error->semaphore = + i915_error_object_create(dev_priv, dev_priv->semaphore); for (i = 0; i < I915_NUM_ENGINES; i++) { struct intel_engine_cs *engine = &dev_priv->engine[i]; + struct drm_i915_error_engine *ee = &error->engine[i]; + struct drm_i915_gem_request *request; - error->ring[i].pid = -1; + ee->pid = -1; + ee->engine_id = -1; if (!intel_engine_initialized(engine)) continue; - error->ring[i].valid = true; + ee->engine_id = i; - i915_record_ring_state(dev_priv, error, engine, &error->ring[i]); - engine_record_waiters(engine, &error->ring[i]); + error_record_engine_registers(error, engine, ee); + error_record_engine_waiters(engine, ee); request = i915_gem_find_active_request(engine); if (request) { - struct i915_address_space *vm; - struct intel_ringbuffer *rb; + struct intel_ring *ring; + struct pid *pid; - vm = request->ctx->ppgtt ? + ee->vm = request->ctx->ppgtt ? &request->ctx->ppgtt->base : &ggtt->base; /* We need to copy these to an anonymous buffer * as the simplest method to avoid being overwritten * by userspace. */ - error->ring[i].batchbuffer = + ee->batchbuffer = i915_error_object_create(dev_priv, - request->batch_obj, - vm); + request->batch); if (HAS_BROKEN_CS_TLB(dev_priv)) - error->ring[i].wa_batchbuffer = - i915_error_ggtt_object_create(dev_priv, - engine->scratch.obj); + ee->wa_batchbuffer = + i915_error_object_create(dev_priv, + engine->scratch); - if (request->pid) { + ee->ctx = + i915_error_object_create(dev_priv, + request->ctx->engine[i].state); + + pid = request->ctx->pid; + if (pid) { struct task_struct *task; rcu_read_lock(); - task = pid_task(request->pid, PIDTYPE_PID); + task = pid_task(pid, PIDTYPE_PID); if (task) { - strcpy(error->ring[i].comm, task->comm); - error->ring[i].pid = task->pid; + strcpy(ee->comm, task->comm); + ee->pid = task->pid; } rcu_read_unlock(); } @@ -1125,153 +1186,106 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv, error->simulated |= request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE; - rb = request->ringbuf; - error->ring[i].cpu_ring_head = rb->head; - error->ring[i].cpu_ring_tail = rb->tail; - error->ring[i].ringbuffer = - i915_error_ggtt_object_create(dev_priv, - rb->obj); - } - - error->ring[i].hws_page = - i915_error_ggtt_object_create(dev_priv, - engine->status_page.obj); - - if (engine->wa_ctx.obj) { - error->ring[i].wa_ctx = - i915_error_ggtt_object_create(dev_priv, - engine->wa_ctx.obj); - } - - i915_gem_record_active_context(engine, error, &error->ring[i]); + ring = request->ring; + ee->cpu_ring_head = ring->head; + ee->cpu_ring_tail = ring->tail; + ee->ringbuffer = + i915_error_object_create(dev_priv, ring->vma); - count = 0; - list_for_each_entry(request, &engine->request_list, list) - count++; - - error->ring[i].num_requests = count; - error->ring[i].requests = - kcalloc(count, sizeof(*error->ring[i].requests), - GFP_ATOMIC); - if (error->ring[i].requests == NULL) { - error->ring[i].num_requests = 0; - continue; + engine_record_requests(engine, request, ee); } - count = 0; - list_for_each_entry(request, &engine->request_list, list) { - struct drm_i915_error_request *erq; - - if (count >= error->ring[i].num_requests) { - /* - * If the ring request list was changed in - * between the point where the error request - * list was created and dimensioned and this - * point then just exit early to avoid crashes. - * - * We don't need to communicate that the - * request list changed state during error - * state capture and that the error state is - * slightly incorrect as a consequence since we - * are typically only interested in the request - * list state at the point of error state - * capture, not in any changes happening during - * the capture. - */ - break; - } + ee->hws_page = + i915_error_object_create(dev_priv, + engine->status_page.vma); - erq = &error->ring[i].requests[count++]; - erq->seqno = request->seqno; - erq->jiffies = request->emitted_jiffies; - erq->tail = request->postfix; - } + ee->wa_ctx = + i915_error_object_create(dev_priv, engine->wa_ctx.vma); } } -/* FIXME: Since pin count/bound list is global, we duplicate what we capture per - * VM. - */ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error, struct i915_address_space *vm, - const int ndx) + int idx) { - struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL; - struct drm_i915_gem_object *obj; + struct drm_i915_error_buffer *active_bo; struct i915_vma *vma; - int i; + int count; - i = 0; + count = 0; list_for_each_entry(vma, &vm->active_list, vm_link) - i++; - error->active_bo_count[ndx] = i; - - list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - list_for_each_entry(vma, &obj->vma_list, obj_link) - if (vma->vm == vm && vma->pin_count > 0) - i++; - } - error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; - - if (i) { - active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC); - if (active_bo) - pinned_bo = active_bo + error->active_bo_count[ndx]; - } + count++; + active_bo = NULL; + if (count) + active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC); if (active_bo) - error->active_bo_count[ndx] = - capture_active_bo(active_bo, - error->active_bo_count[ndx], - &vm->active_list); - - if (pinned_bo) - error->pinned_bo_count[ndx] = - capture_pinned_bo(pinned_bo, - error->pinned_bo_count[ndx], - &dev_priv->mm.bound_list, vm); - error->active_bo[ndx] = active_bo; - error->pinned_bo[ndx] = pinned_bo; + count = capture_error_bo(active_bo, count, &vm->active_list, false); + else + count = 0; + + error->active_vm[idx] = vm; + error->active_bo[idx] = active_bo; + error->active_bo_count[idx] = count; } -static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, - struct drm_i915_error_state *error) +static void i915_capture_active_buffers(struct drm_i915_private *dev_priv, + struct drm_i915_error_state *error) { - struct i915_address_space *vm; - int cnt = 0, i = 0; - - list_for_each_entry(vm, &dev_priv->vm_list, global_link) - cnt++; - - error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC); - error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC); - error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count), - GFP_ATOMIC); - error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count), - GFP_ATOMIC); - - if (error->active_bo == NULL || - error->pinned_bo == NULL || - error->active_bo_count == NULL || - error->pinned_bo_count == NULL) { - kfree(error->active_bo); - kfree(error->active_bo_count); - kfree(error->pinned_bo); - kfree(error->pinned_bo_count); - - error->active_bo = NULL; - error->active_bo_count = NULL; - error->pinned_bo = NULL; - error->pinned_bo_count = NULL; - } else { - list_for_each_entry(vm, &dev_priv->vm_list, global_link) - i915_gem_capture_vm(dev_priv, error, vm, i++); + int cnt = 0, i, j; + + BUILD_BUG_ON(ARRAY_SIZE(error->engine) > ARRAY_SIZE(error->active_bo)); + BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_vm)); + BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_bo_count)); - error->vm_count = cnt; + /* Scan each engine looking for unique active contexts/vm */ + for (i = 0; i < ARRAY_SIZE(error->engine); i++) { + struct drm_i915_error_engine *ee = &error->engine[i]; + bool found; + + if (!ee->vm) + continue; + + found = false; + for (j = 0; j < i && !found; j++) + found = error->engine[j].vm == ee->vm; + if (!found) + i915_gem_capture_vm(dev_priv, error, ee->vm, cnt++); } } +static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv, + struct drm_i915_error_state *error) +{ + struct i915_address_space *vm = &dev_priv->ggtt.base; + struct drm_i915_error_buffer *bo; + struct i915_vma *vma; + int count_inactive, count_active; + + count_inactive = 0; + list_for_each_entry(vma, &vm->active_list, vm_link) + count_inactive++; + + count_active = 0; + list_for_each_entry(vma, &vm->inactive_list, vm_link) + count_active++; + + bo = NULL; + if (count_inactive + count_active) + bo = kcalloc(count_inactive + count_active, + sizeof(*bo), GFP_ATOMIC); + if (!bo) + return; + + count_inactive = capture_error_bo(bo, count_inactive, + &vm->active_list, true); + count_active = capture_error_bo(bo + count_inactive, count_active, + &vm->inactive_list, true); + error->pinned_bo_count = count_inactive + count_active; + error->pinned_bo = bo; +} + /* Capture all registers which don't fit into another category. */ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error) @@ -1352,20 +1366,20 @@ static void i915_error_capture_msg(struct drm_i915_private *dev_priv, const char *error_msg) { u32 ecode; - int ring_id = -1, len; + int engine_id = -1, len; - ecode = i915_error_generate_code(dev_priv, error, &ring_id); + ecode = i915_error_generate_code(dev_priv, error, &engine_id); len = scnprintf(error->error_msg, sizeof(error->error_msg), "GPU HANG: ecode %d:%d:0x%08x", - INTEL_GEN(dev_priv), ring_id, ecode); + INTEL_GEN(dev_priv), engine_id, ecode); - if (ring_id != -1 && error->ring[ring_id].pid != -1) + if (engine_id != -1 && error->engine[engine_id].pid != -1) len += scnprintf(error->error_msg + len, sizeof(error->error_msg) - len, ", in %s [%d]", - error->ring[ring_id].comm, - error->ring[ring_id].pid); + error->engine[engine_id].comm, + error->engine[engine_id].pid); scnprintf(error->error_msg + len, sizeof(error->error_msg) - len, ", reason: %s, action: %s", @@ -1382,6 +1396,10 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv, #endif error->reset_count = i915_reset_count(&dev_priv->gpu_error); error->suspend_count = dev_priv->suspend_count; + + memcpy(&error->device_info, + INTEL_INFO(dev_priv), + sizeof(error->device_info)); } /** @@ -1415,9 +1433,10 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv, i915_capture_gen_state(dev_priv, error); i915_capture_reg_state(dev_priv, error); - i915_gem_capture_buffers(dev_priv, error); i915_gem_record_fences(dev_priv, error); i915_gem_record_rings(dev_priv, error); + i915_capture_active_buffers(dev_priv, error); + i915_capture_pinned_buffers(dev_priv, error); do_gettimeofday(&error->time); diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 2112e029db6a..e4369411f07d 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -183,7 +183,7 @@ static int guc_update_doorbell_id(struct intel_guc *guc, struct i915_guc_client *client, u16 new_id) { - struct sg_table *sg = guc->ctx_pool_obj->pages; + struct sg_table *sg = guc->ctx_pool_vma->pages; void *doorbell_bitmap = guc->doorbell_bitmap; struct guc_doorbell_info *doorbell; struct guc_context_desc desc; @@ -325,7 +325,6 @@ static void guc_init_proc_desc(struct intel_guc *guc, static void guc_init_ctx_desc(struct intel_guc *guc, struct i915_guc_client *client) { - struct drm_i915_gem_object *client_obj = client->client_obj; struct drm_i915_private *dev_priv = guc_to_i915(guc); struct intel_engine_cs *engine; struct i915_gem_context *ctx = client->owner; @@ -340,10 +339,10 @@ static void guc_init_ctx_desc(struct intel_guc *guc, desc.priority = client->priority; desc.db_id = client->doorbell_id; - for_each_engine(engine, dev_priv) { + for_each_engine_masked(engine, dev_priv, client->engines) { struct intel_context *ce = &ctx->engine[engine->id]; - struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id]; - struct drm_i915_gem_object *obj; + uint32_t guc_engine_id = engine->guc_id; + struct guc_execlist_context *lrc = &desc.lrc[guc_engine_id]; /* TODO: We have a design issue to be solved here. Only when we * receive the first batch, we know which engine is used by the @@ -358,30 +357,29 @@ static void guc_init_ctx_desc(struct intel_guc *guc, lrc->context_desc = lower_32_bits(ce->lrc_desc); /* The state page is after PPHWSP */ - gfx_addr = i915_gem_obj_ggtt_offset(ce->state); - lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE; + lrc->ring_lcra = + i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE; lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | - (engine->guc_id << GUC_ELC_ENGINE_OFFSET); - - obj = ce->ringbuf->obj; - gfx_addr = i915_gem_obj_ggtt_offset(obj); + (guc_engine_id << GUC_ELC_ENGINE_OFFSET); - lrc->ring_begin = gfx_addr; - lrc->ring_end = gfx_addr + obj->base.size - 1; - lrc->ring_next_free_location = gfx_addr; + lrc->ring_begin = i915_ggtt_offset(ce->ring->vma); + lrc->ring_end = lrc->ring_begin + ce->ring->size - 1; + lrc->ring_next_free_location = lrc->ring_begin; lrc->ring_current_tail_pointer_value = 0; - desc.engines_used |= (1 << engine->guc_id); + desc.engines_used |= (1 << guc_engine_id); } + DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n", + client->engines, desc.engines_used); WARN_ON(desc.engines_used == 0); /* * The doorbell, process descriptor, and workqueue are all parts * of the client object, which the GuC will reference via the GGTT */ - gfx_addr = i915_gem_obj_ggtt_offset(client_obj); - desc.db_trigger_phy = sg_dma_address(client_obj->pages->sgl) + + gfx_addr = i915_ggtt_offset(client->vma); + desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) + client->doorbell_offset; desc.db_trigger_cpu = (uintptr_t)client->client_base + client->doorbell_offset; @@ -397,7 +395,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc, desc.desc_private = (uintptr_t)client; /* Pool context is pinned already */ - sg = guc->ctx_pool_obj->pages; + sg = guc->ctx_pool_vma->pages; sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc), sizeof(desc) * client->ctx_index); } @@ -410,7 +408,7 @@ static void guc_fini_ctx_desc(struct intel_guc *guc, memset(&desc, 0, sizeof(desc)); - sg = guc->ctx_pool_obj->pages; + sg = guc->ctx_pool_vma->pages; sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc), sizeof(desc) * client->ctx_index); } @@ -457,6 +455,7 @@ static void guc_add_workqueue_item(struct i915_guc_client *gc, /* wqi_len is in DWords, and does not include the one-word header */ const size_t wqi_size = sizeof(struct guc_wq_item); const u32 wqi_len = wqi_size/sizeof(u32) - 1; + struct intel_engine_cs *engine = rq->engine; struct guc_process_desc *desc; struct guc_wq_item *wqi; void *base; @@ -492,21 +491,20 @@ static void guc_add_workqueue_item(struct i915_guc_client *gc, /* WQ starts from the page after doorbell / process_desc */ wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT; wq_off &= PAGE_SIZE - 1; - base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, wq_page)); + base = kmap_atomic(i915_gem_object_get_page(gc->vma->obj, wq_page)); wqi = (struct guc_wq_item *)((char *)base + wq_off); /* Now fill in the 4-word work queue item */ wqi->header = WQ_TYPE_INORDER | (wqi_len << WQ_LEN_SHIFT) | - (rq->engine->guc_id << WQ_TARGET_SHIFT) | + (engine->guc_id << WQ_TARGET_SHIFT) | WQ_NO_WCFLUSH_WAIT; /* The GuC wants only the low-order word of the context descriptor */ - wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, - rq->engine); + wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine); wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT; - wqi->fence_id = rq->seqno; + wqi->fence_id = rq->fence.seqno; kunmap_atomic(base); } @@ -585,7 +583,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc) * The only error here arises if the doorbell hardware isn't functioning * as expected, which really shouln't happen. */ -int i915_guc_submit(struct drm_i915_gem_request *rq) +static void i915_guc_submit(struct drm_i915_gem_request *rq) { unsigned int engine_id = rq->engine->id; struct intel_guc *guc = &rq->i915->guc; @@ -601,9 +599,7 @@ int i915_guc_submit(struct drm_i915_gem_request *rq) client->b_fail += 1; guc->submissions[engine_id] += 1; - guc->last_seqno[engine_id] = rq->seqno; - - return b_ret; + guc->last_seqno[engine_id] = rq->fence.seqno; } /* @@ -613,55 +609,48 @@ int i915_guc_submit(struct drm_i915_gem_request *rq) */ /** - * gem_allocate_guc_obj() - Allocate gem object for GuC usage - * @dev_priv: driver private data structure - * @size: size of object + * guc_allocate_vma() - Allocate a GGTT VMA for GuC usage + * @guc: the guc + * @size: size of area to allocate (both virtual space and memory) * - * This is a wrapper to create a gem obj. In order to use it inside GuC, the - * object needs to be pinned lifetime. Also we must pin it to gtt space other - * than [0, GUC_WOPCM_TOP) because this range is reserved inside GuC. + * This is a wrapper to create an object for use with the GuC. In order to + * use it inside the GuC, an object needs to be pinned lifetime, so we allocate + * both some backing storage and a range inside the Global GTT. We must pin + * it in the GGTT somewhere other than than [0, GUC_WOPCM_TOP) because that + * range is reserved inside GuC. * - * Return: A drm_i915_gem_object if successful, otherwise NULL. + * Return: A i915_vma if successful, otherwise an ERR_PTR. */ -static struct drm_i915_gem_object * -gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size) +static struct i915_vma *guc_allocate_vma(struct intel_guc *guc, u32 size) { + struct drm_i915_private *dev_priv = guc_to_i915(guc); struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int ret; obj = i915_gem_object_create(&dev_priv->drm, size); if (IS_ERR(obj)) - return NULL; + return ERR_CAST(obj); - if (i915_gem_object_get_pages(obj)) { - drm_gem_object_unreference(&obj->base); - return NULL; - } + vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL); + if (IS_ERR(vma)) + goto err; - if (i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, - PIN_OFFSET_BIAS | GUC_WOPCM_TOP)) { - drm_gem_object_unreference(&obj->base); - return NULL; + ret = i915_vma_pin(vma, 0, PAGE_SIZE, + PIN_GLOBAL | PIN_OFFSET_BIAS | GUC_WOPCM_TOP); + if (ret) { + vma = ERR_PTR(ret); + goto err; } /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */ I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); - return obj; -} - -/** - * gem_release_guc_obj() - Release gem object allocated for GuC usage - * @obj: gem obj to be released - */ -static void gem_release_guc_obj(struct drm_i915_gem_object *obj) -{ - if (!obj) - return; - - if (i915_gem_obj_is_pinned(obj)) - i915_gem_object_ggtt_unpin(obj); + return vma; - drm_gem_object_unreference(&obj->base); +err: + i915_gem_object_put(obj); + return vma; } static void @@ -688,7 +677,7 @@ guc_client_free(struct drm_i915_private *dev_priv, kunmap(kmap_to_page(client->client_base)); } - gem_release_guc_obj(client->client_obj); + i915_vma_unpin_and_release(&client->vma); if (client->ctx_index != GUC_INVALID_CTX_ID) { guc_fini_ctx_desc(guc, client); @@ -698,29 +687,47 @@ guc_client_free(struct drm_i915_private *dev_priv, kfree(client); } +/* Check that a doorbell register is in the expected state */ +static bool guc_doorbell_check(struct intel_guc *guc, uint16_t db_id) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + i915_reg_t drbreg = GEN8_DRBREGL(db_id); + uint32_t value = I915_READ(drbreg); + bool enabled = (value & GUC_DOORBELL_ENABLED) != 0; + bool expected = test_bit(db_id, guc->doorbell_bitmap); + + if (enabled == expected) + return true; + + DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) 0x%x, should be %s\n", + db_id, drbreg.reg, value, + expected ? "active" : "inactive"); + + return false; +} + /* - * Borrow the first client to set up & tear down every doorbell + * Borrow the first client to set up & tear down each unused doorbell * in turn, to ensure that all doorbell h/w is (re)initialised. */ static void guc_init_doorbell_hw(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); struct i915_guc_client *client = guc->execbuf_client; - uint16_t db_id, i; - int err; + uint16_t db_id; + int i, err; + /* Save client's original doorbell selection */ db_id = client->doorbell_id; for (i = 0; i < GUC_MAX_DOORBELLS; ++i) { - i915_reg_t drbreg = GEN8_DRBREGL(i); - u32 value = I915_READ(drbreg); + /* Skip if doorbell is OK */ + if (guc_doorbell_check(guc, i)) + continue; err = guc_update_doorbell_id(guc, client, i); - - /* Report update failure or unexpectedly active doorbell */ - if (err || (i != db_id && (value & GUC_DOORBELL_ENABLED))) - DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) was 0x%x, err %d\n", - i, drbreg.reg, value, err); + if (err) + DRM_DEBUG_DRIVER("Doorbell %d update failed, err %d\n", + i, err); } /* Restore to original value */ @@ -729,20 +736,15 @@ static void guc_init_doorbell_hw(struct intel_guc *guc) DRM_ERROR("Failed to restore doorbell to %d, err %d\n", db_id, err); - for (i = 0; i < GUC_MAX_DOORBELLS; ++i) { - i915_reg_t drbreg = GEN8_DRBREGL(i); - u32 value = I915_READ(drbreg); - - if (i != db_id && (value & GUC_DOORBELL_ENABLED)) - DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) finally 0x%x\n", - i, drbreg.reg, value); - - } + /* Read back & verify all doorbell registers */ + for (i = 0; i < GUC_MAX_DOORBELLS; ++i) + (void)guc_doorbell_check(guc, i); } /** * guc_client_alloc() - Allocate an i915_guc_client * @dev_priv: driver private data structure + * @engines: The set of engines to enable for this client * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW * The kernel client to replace ExecList submission is created with * NORMAL priority. Priority of a client for scheduler can be HIGH, @@ -754,22 +756,24 @@ static void guc_init_doorbell_hw(struct intel_guc *guc) */ static struct i915_guc_client * guc_client_alloc(struct drm_i915_private *dev_priv, + uint32_t engines, uint32_t priority, struct i915_gem_context *ctx) { struct i915_guc_client *client; struct intel_guc *guc = &dev_priv->guc; - struct drm_i915_gem_object *obj; + struct i915_vma *vma; uint16_t db_id; client = kzalloc(sizeof(*client), GFP_KERNEL); if (!client) return NULL; - client->doorbell_id = GUC_INVALID_DOORBELL_ID; - client->priority = priority; client->owner = ctx; client->guc = guc; + client->engines = engines; + client->priority = priority; + client->doorbell_id = GUC_INVALID_DOORBELL_ID; client->ctx_index = (uint32_t)ida_simple_get(&guc->ctx_ids, 0, GUC_MAX_GPU_CONTEXTS, GFP_KERNEL); @@ -779,13 +783,13 @@ guc_client_alloc(struct drm_i915_private *dev_priv, } /* The first page is doorbell/proc_desc. Two followed pages are wq. */ - obj = gem_allocate_guc_obj(dev_priv, GUC_DB_SIZE + GUC_WQ_SIZE); - if (!obj) + vma = guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE); + if (IS_ERR(vma)) goto err; /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */ - client->client_obj = obj; - client->client_base = kmap(i915_gem_object_get_page(obj, 0)); + client->vma = vma; + client->client_base = kmap(i915_vma_first_page(vma)); client->wq_offset = GUC_DB_SIZE; client->wq_size = GUC_WQ_SIZE; @@ -811,8 +815,8 @@ guc_client_alloc(struct drm_i915_private *dev_priv, if (guc_init_doorbell(guc, client, db_id)) goto err; - DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u\n", - priority, client, client->ctx_index); + DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: ctx_index %u\n", + priority, client, client->engines, client->ctx_index); DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%x\n", client->doorbell_id, client->doorbell_offset); @@ -827,8 +831,7 @@ err: static void guc_create_log(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct drm_i915_gem_object *obj; + struct i915_vma *vma; unsigned long offset; uint32_t size, flags; @@ -844,16 +847,16 @@ static void guc_create_log(struct intel_guc *guc) GUC_LOG_ISR_PAGES + 1 + GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT; - obj = guc->log_obj; - if (!obj) { - obj = gem_allocate_guc_obj(dev_priv, size); - if (!obj) { + vma = guc->log_vma; + if (!vma) { + vma = guc_allocate_vma(guc, size); + if (IS_ERR(vma)) { /* logging will be off */ i915.guc_log_level = -1; return; } - guc->log_obj = obj; + guc->log_vma = vma; } /* each allocated unit is a page */ @@ -862,7 +865,7 @@ static void guc_create_log(struct intel_guc *guc) (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) | (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT); - offset = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; /* in pages */ + offset = i915_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */ guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags; } @@ -891,7 +894,7 @@ static void init_guc_policies(struct guc_policies *policies) static void guc_create_ads(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct drm_i915_gem_object *obj; + struct i915_vma *vma; struct guc_ads *ads; struct guc_policies *policies; struct guc_mmio_reg_state *reg_state; @@ -904,16 +907,16 @@ static void guc_create_ads(struct intel_guc *guc) sizeof(struct guc_mmio_reg_state) + GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE; - obj = guc->ads_obj; - if (!obj) { - obj = gem_allocate_guc_obj(dev_priv, PAGE_ALIGN(size)); - if (!obj) + vma = guc->ads_vma; + if (!vma) { + vma = guc_allocate_vma(guc, PAGE_ALIGN(size)); + if (IS_ERR(vma)) return; - guc->ads_obj = obj; + guc->ads_vma = vma; } - page = i915_gem_object_get_page(obj, 0); + page = i915_vma_first_page(vma); ads = kmap(page); /* @@ -924,7 +927,7 @@ static void guc_create_ads(struct intel_guc *guc) * to find it. */ engine = &dev_priv->engine[RCS]; - ads->golden_context_lrca = engine->status_page.gfx_addr; + ads->golden_context_lrca = engine->status_page.ggtt_offset; for_each_engine(engine, dev_priv) ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine); @@ -933,8 +936,8 @@ static void guc_create_ads(struct intel_guc *guc) policies = (void *)ads + sizeof(struct guc_ads); init_guc_policies(policies); - ads->scheduler_policies = i915_gem_obj_ggtt_offset(obj) + - sizeof(struct guc_ads); + ads->scheduler_policies = + i915_ggtt_offset(vma) + sizeof(struct guc_ads); /* MMIO reg state */ reg_state = (void *)policies + sizeof(struct guc_policies); @@ -962,10 +965,9 @@ static void guc_create_ads(struct intel_guc *guc) */ int i915_guc_submission_init(struct drm_i915_private *dev_priv) { - const size_t ctxsize = sizeof(struct guc_context_desc); - const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize; - const size_t gemsize = round_up(poolsize, PAGE_SIZE); struct intel_guc *guc = &dev_priv->guc; + struct i915_vma *vma; + u32 size; /* Wipe bitmap & delete client in case of reinitialisation */ bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS); @@ -974,13 +976,15 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv) if (!i915.enable_guc_submission) return 0; /* not enabled */ - if (guc->ctx_pool_obj) + if (guc->ctx_pool_vma) return 0; /* already allocated */ - guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv, gemsize); - if (!guc->ctx_pool_obj) - return -ENOMEM; + size = PAGE_ALIGN(GUC_MAX_GPU_CONTEXTS*sizeof(struct guc_context_desc)); + vma = guc_allocate_vma(guc, size); + if (IS_ERR(vma)) + return PTR_ERR(vma); + guc->ctx_pool_vma = vma; ida_init(&guc->ctx_ids); guc_create_log(guc); guc_create_ads(guc); @@ -992,9 +996,11 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv) { struct intel_guc *guc = &dev_priv->guc; struct i915_guc_client *client; + struct intel_engine_cs *engine; /* client for execbuf submission */ client = guc_client_alloc(dev_priv, + INTEL_INFO(dev_priv)->ring_mask, GUC_CTX_PRIORITY_KMD_NORMAL, dev_priv->kernel_context); if (!client) { @@ -1006,6 +1012,10 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv) host2guc_sample_forcewake(guc, client); guc_init_doorbell_hw(guc); + /* Take over from manual control of ELSP (execlists) */ + for_each_engine(engine, dev_priv) + engine->submit_request = i915_guc_submit; + return 0; } @@ -1013,6 +1023,12 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv) { struct intel_guc *guc = &dev_priv->guc; + if (!guc->execbuf_client) + return; + + /* Revert back to manual ELSP submission */ + intel_execlists_enable_submission(dev_priv); + guc_client_free(dev_priv, guc->execbuf_client); guc->execbuf_client = NULL; } @@ -1021,16 +1037,12 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv) { struct intel_guc *guc = &dev_priv->guc; - gem_release_guc_obj(dev_priv->guc.ads_obj); - guc->ads_obj = NULL; - - gem_release_guc_obj(dev_priv->guc.log_obj); - guc->log_obj = NULL; + i915_vma_unpin_and_release(&guc->ads_vma); + i915_vma_unpin_and_release(&guc->log_vma); - if (guc->ctx_pool_obj) + if (guc->ctx_pool_vma) ida_destroy(&guc->ctx_ids); - gem_release_guc_obj(guc->ctx_pool_obj); - guc->ctx_pool_obj = NULL; + i915_vma_unpin_and_release(&guc->ctx_pool_vma); } /** @@ -1053,7 +1065,7 @@ int intel_guc_suspend(struct drm_device *dev) /* any value greater than GUC_POWER_D0 */ data[1] = GUC_POWER_D1; /* first page is shared data with GuC */ - data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state); + data[2] = i915_ggtt_offset(ctx->engine[RCS].state); return host2guc_action(guc, data, ARRAY_SIZE(data)); } @@ -1078,7 +1090,7 @@ int intel_guc_resume(struct drm_device *dev) data[0] = HOST2GUC_ACTION_EXIT_S_STATE; data[1] = GUC_POWER_D0; /* first page is shared data with GuC */ - data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state); + data[2] = i915_ggtt_offset(ctx->engine[RCS].state); return host2guc_action(guc, data, ARRAY_SIZE(data)); } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 1c2aec392412..ebb83d5a448b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -656,12 +656,6 @@ static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) * of horizontal active on the first line of vertical active */ -static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe) -{ - /* Gen2 doesn't have a hardware frame counter */ - return 0; -} - /* Called from drm generic code, passed a 'crtc', which * we use as a pipe index */ @@ -978,10 +972,8 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) static void notify_ring(struct intel_engine_cs *engine) { smp_store_mb(engine->breadcrumbs.irq_posted, true); - if (intel_engine_wakeup(engine)) { + if (intel_engine_wakeup(engine)) trace_i915_gem_request_notify(engine); - engine->breadcrumbs.irq_wakeups++; - } } static void vlv_c0_read(struct drm_i915_private *dev_priv, @@ -1105,9 +1097,10 @@ static void gen6_pm_rps_work(struct work_struct *work) new_delay = dev_priv->rps.cur_freq; min = dev_priv->rps.min_freq_softlimit; max = dev_priv->rps.max_freq_softlimit; - - if (client_boost) { - new_delay = dev_priv->rps.max_freq_softlimit; + if (client_boost || any_waiters(dev_priv)) + max = dev_priv->rps.max_freq; + if (client_boost && new_delay < dev_priv->rps.boost_freq) { + new_delay = dev_priv->rps.boost_freq; adj = 0; } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { if (adj > 0) @@ -1122,7 +1115,7 @@ static void gen6_pm_rps_work(struct work_struct *work) new_delay = dev_priv->rps.efficient_freq; adj = 0; } - } else if (any_waiters(dev_priv)) { + } else if (client_boost || any_waiters(dev_priv)) { adj = 0; } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) @@ -2804,13 +2797,6 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) } static bool -ring_idle(struct intel_engine_cs *engine, u32 seqno) -{ - return i915_seqno_passed(seqno, - READ_ONCE(engine->last_submitted_seqno)); -} - -static bool ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr) { if (INTEL_GEN(engine->i915) >= 8) { @@ -2859,6 +2845,7 @@ static struct intel_engine_cs * semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) { struct drm_i915_private *dev_priv = engine->i915; + void __iomem *vaddr; u32 cmd, ipehr, head; u64 offset = 0; int i, backwards; @@ -2897,6 +2884,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) */ head = I915_READ_HEAD(engine) & HEAD_ADDR; backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4; + vaddr = (void __iomem *)engine->buffer->vaddr; for (i = backwards; i; --i) { /* @@ -2907,7 +2895,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) head &= engine->buffer->size - 1; /* This here seems to blow up */ - cmd = ioread32(engine->buffer->virtual_start + head); + cmd = ioread32(vaddr + head); if (cmd == ipehr) break; @@ -2917,11 +2905,11 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) if (!i) return NULL; - *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1; + *seqno = ioread32(vaddr + head + 4) + 1; if (INTEL_GEN(dev_priv) >= 8) { - offset = ioread32(engine->buffer->virtual_start + head + 12); + offset = ioread32(vaddr + head + 12); offset <<= 32; - offset = ioread32(engine->buffer->virtual_start + head + 8); + offset |= ioread32(vaddr + head + 8); } return semaphore_wait_to_signaller_ring(engine, ipehr, offset); } @@ -2990,7 +2978,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine) return stuck; } -static enum intel_ring_hangcheck_action +static enum intel_engine_hangcheck_action head_stuck(struct intel_engine_cs *engine, u64 acthd) { if (acthd != engine->hangcheck.acthd) { @@ -3008,11 +2996,11 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd) return HANGCHECK_HUNG; } -static enum intel_ring_hangcheck_action -ring_stuck(struct intel_engine_cs *engine, u64 acthd) +static enum intel_engine_hangcheck_action +engine_stuck(struct intel_engine_cs *engine, u64 acthd) { struct drm_i915_private *dev_priv = engine->i915; - enum intel_ring_hangcheck_action ha; + enum intel_engine_hangcheck_action ha; u32 tmp; ha = head_stuck(engine, acthd); @@ -3054,22 +3042,6 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd) return HANGCHECK_HUNG; } -static unsigned long kick_waiters(struct intel_engine_cs *engine) -{ - struct drm_i915_private *i915 = engine->i915; - unsigned long irq_count = READ_ONCE(engine->breadcrumbs.irq_wakeups); - - if (engine->hangcheck.user_interrupts == irq_count && - !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) { - if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings)) - DRM_ERROR("Hangcheck timer elapsed... %s idle\n", - engine->name); - - intel_engine_enable_fake_irq(engine); - } - - return irq_count; -} /* * This is called when the chip hasn't reported back with completed * batchbuffers in a long time. We keep track per ring seqno progress and @@ -3107,7 +3079,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work) bool busy = intel_engine_has_waiter(engine); u64 acthd; u32 seqno; - unsigned user_interrupts; semaphore_clear_deadlocks(dev_priv); @@ -3121,29 +3092,25 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (engine->irq_seqno_barrier) engine->irq_seqno_barrier(engine); - acthd = intel_ring_get_active_head(engine); + acthd = intel_engine_get_active_head(engine); seqno = intel_engine_get_seqno(engine); - /* Reset stuck interrupts between batch advances */ - user_interrupts = 0; - if (engine->hangcheck.seqno == seqno) { - if (ring_idle(engine, seqno)) { + if (!intel_engine_is_active(engine)) { engine->hangcheck.action = HANGCHECK_IDLE; if (busy) { /* Safeguard against driver failure */ - user_interrupts = kick_waiters(engine); engine->hangcheck.score += BUSY; } } else { /* We always increment the hangcheck score - * if the ring is busy and still processing + * if the engine is busy and still processing * the same request, so that no single request * can run indefinitely (such as a chain of * batches). The only time we do not increment * the hangcheck score on this ring, if this - * ring is in a legitimate wait for another - * ring. In that case the waiting ring is a + * engine is in a legitimate wait for another + * engine. In that case the waiting engine is a * victim and we want to be sure we catch the * right culprit. Then every time we do kick * the ring, add a small increment to the @@ -3151,8 +3118,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work) * being repeatedly kicked and so responsible * for stalling the machine. */ - engine->hangcheck.action = ring_stuck(engine, - acthd); + engine->hangcheck.action = + engine_stuck(engine, acthd); switch (engine->hangcheck.action) { case HANGCHECK_IDLE: @@ -3195,7 +3162,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work) engine->hangcheck.seqno = seqno; engine->hangcheck.acthd = acthd; - engine->hangcheck.user_interrupts = user_interrupts; busy_count += busy; } @@ -4542,8 +4508,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv) i915_hangcheck_elapsed); if (IS_GEN2(dev_priv)) { + /* Gen2 doesn't have a hardware frame counter */ dev->max_vblank_count = 0; - dev->driver->get_vblank_counter = i8xx_get_vblank_counter; + dev->driver->get_vblank_counter = drm_vblank_no_hw_counter; } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ dev->driver->get_vblank_counter = g4x_get_vblank_counter; diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c new file mode 100644 index 000000000000..49a079494b68 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_memcpy.c @@ -0,0 +1,101 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include <linux/kernel.h> +#include <asm/fpu/api.h> + +#include "i915_drv.h" + +static DEFINE_STATIC_KEY_FALSE(has_movntdqa); + +#ifdef CONFIG_AS_MOVNTDQA +static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len) +{ + kernel_fpu_begin(); + + len >>= 4; + while (len >= 4) { + asm("movntdqa (%0), %%xmm0\n" + "movntdqa 16(%0), %%xmm1\n" + "movntdqa 32(%0), %%xmm2\n" + "movntdqa 48(%0), %%xmm3\n" + "movaps %%xmm0, (%1)\n" + "movaps %%xmm1, 16(%1)\n" + "movaps %%xmm2, 32(%1)\n" + "movaps %%xmm3, 48(%1)\n" + :: "r" (src), "r" (dst) : "memory"); + src += 64; + dst += 64; + len -= 4; + } + while (len--) { + asm("movntdqa (%0), %%xmm0\n" + "movaps %%xmm0, (%1)\n" + :: "r" (src), "r" (dst) : "memory"); + src += 16; + dst += 16; + } + + kernel_fpu_end(); +} +#endif + +/** + * i915_memcpy_from_wc: perform an accelerated *aligned* read from WC + * @dst: destination pointer + * @src: source pointer + * @len: how many bytes to copy + * + * i915_memcpy_from_wc copies @len bytes from @src to @dst using + * non-temporal instructions where available. Note that all arguments + * (@src, @dst) must be aligned to 16 bytes and @len must be a multiple + * of 16. + * + * To test whether accelerated reads from WC are supported, use + * i915_memcpy_from_wc(NULL, NULL, 0); + * + * Returns true if the copy was successful, false if the preconditions + * are not met. + */ +bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len) +{ + if (unlikely(((unsigned long)dst | (unsigned long)src | len) & 15)) + return false; + +#ifdef CONFIG_AS_MOVNTDQA + if (static_branch_likely(&has_movntdqa)) { + if (likely(len)) + __memcpy_ntdqa(dst, src, len); + return true; + } +#endif + + return false; +} + +void i915_memcpy_init_early(struct drm_i915_private *dev_priv) +{ + if (static_cpu_has(X86_FEATURE_XMM4_1)) + static_branch_enable(&has_movntdqa); +} diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c new file mode 100644 index 000000000000..e4935dd1fd37 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_mm.c @@ -0,0 +1,84 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include <linux/mm.h> +#include <linux/io-mapping.h> + +#include <asm/pgtable.h> + +#include "i915_drv.h" + +struct remap_pfn { + struct mm_struct *mm; + unsigned long pfn; + pgprot_t prot; +}; + +static int remap_pfn(pte_t *pte, pgtable_t token, + unsigned long addr, void *data) +{ + struct remap_pfn *r = data; + + /* Special PTE are not associated with any struct page */ + set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot))); + r->pfn++; + + return 0; +} + +/** + * remap_io_mapping - remap an IO mapping to userspace + * @vma: user vma to map to + * @addr: target user address to start at + * @pfn: physical address of kernel memory + * @size: size of map area + * @iomap: the source io_mapping + * + * Note: this is only safe if the mm semaphore is held when called. + */ +int remap_io_mapping(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, unsigned long size, + struct io_mapping *iomap) +{ + struct remap_pfn r; + int err; + + GEM_BUG_ON((vma->vm_flags & + (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)) != + (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)); + + /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ + r.mm = vma->vm_mm; + r.pfn = pfn; + r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) | + (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK)); + + err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r); + if (unlikely(err)) { + zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT); + return err; + } + + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index b6e404c91eed..768ad89d9cd4 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -45,6 +45,7 @@ struct i915_params i915 __read_mostly = { .fastboot = 0, .prefault_disable = 0, .load_detect_test = 0, + .force_reset_modeset_test = 0, .reset = true, .invert_brightness = 0, .disable_display = 0, @@ -161,6 +162,11 @@ MODULE_PARM_DESC(load_detect_test, "Force-enable the VGA load detect code for testing (default:false). " "For developers only."); +module_param_named_unsafe(force_reset_modeset_test, i915.force_reset_modeset_test, bool, 0600); +MODULE_PARM_DESC(force_reset_modeset_test, + "Force a modeset during gpu reset for testing (default:false). " + "For developers only."); + module_param_named_unsafe(invert_brightness, i915.invert_brightness, int, 0600); MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness " diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 0ad020b4a925..3a0dd78ddb38 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -57,6 +57,7 @@ struct i915_params { bool fastboot; bool prefault_disable; bool load_detect_test; + bool force_reset_modeset_test; bool reset; bool disable_display; bool verbose_state_checks; diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 949c01686a66..2587b1bd41f4 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -173,6 +173,7 @@ static const struct intel_device_info intel_pineview_info = { .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2, .need_gfx_hws = 1, .has_hotplug = 1, .has_overlay = 1, + .ring_mask = RENDER_RING, GEN_DEFAULT_PIPEOFFSETS, CURSOR_OFFSETS, }; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ce14fe09d962..d4adf2806c50 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -186,13 +186,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN9_GRDOM_GUC (1 << 5) #define GEN8_GRDOM_MEDIA2 (1 << 7) -#define RING_PP_DIR_BASE(ring) _MMIO((ring)->mmio_base+0x228) -#define RING_PP_DIR_BASE_READ(ring) _MMIO((ring)->mmio_base+0x518) -#define RING_PP_DIR_DCLV(ring) _MMIO((ring)->mmio_base+0x220) +#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base+0x228) +#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base+0x518) +#define RING_PP_DIR_DCLV(engine) _MMIO((engine)->mmio_base+0x220) #define PP_DIR_DCLV_2G 0xffffffff -#define GEN8_RING_PDP_UDW(ring, n) _MMIO((ring)->mmio_base+0x270 + (n) * 8 + 4) -#define GEN8_RING_PDP_LDW(ring, n) _MMIO((ring)->mmio_base+0x270 + (n) * 8) +#define GEN8_RING_PDP_UDW(engine, n) _MMIO((engine)->mmio_base+0x270 + (n) * 8 + 4) +#define GEN8_RING_PDP_LDW(engine, n) _MMIO((engine)->mmio_base+0x270 + (n) * 8) #define GEN8_R_PWR_CLK_STATE _MMIO(0x20C8) #define GEN8_RPCS_ENABLE (1 << 31) @@ -1536,6 +1536,7 @@ enum skl_disp_power_wells { #define BALANCE_LEG_MASK(port) (7<<(8+3*(port))) /* Balance leg disable bits */ #define BALANCE_LEG_DISABLE_SHIFT 23 +#define BALANCE_LEG_DISABLE(port) (1 << (23 + (port))) /* * Fence registers @@ -1647,7 +1648,7 @@ enum skl_disp_power_wells { #define ARB_MODE_BWGTLB_DISABLE (1<<9) #define ARB_MODE_SWIZZLE_BDW (1<<1) #define RENDER_HWS_PGA_GEN7 _MMIO(0x04080) -#define RING_FAULT_REG(ring) _MMIO(0x4094 + 0x100*(ring)->id) +#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100*(engine)->hw_id) #define RING_FAULT_GTTSEL_MASK (1<<11) #define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff) #define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3) @@ -1845,7 +1846,7 @@ enum skl_disp_power_wells { #define GFX_MODE _MMIO(0x2520) #define GFX_MODE_GEN7 _MMIO(0x229c) -#define RING_MODE_GEN7(ring) _MMIO((ring)->mmio_base+0x29c) +#define RING_MODE_GEN7(engine) _MMIO((engine)->mmio_base+0x29c) #define GFX_RUN_LIST_ENABLE (1<<15) #define GFX_INTERRUPT_STEERING (1<<14) #define GFX_TLB_INVALIDATE_EXPLICIT (1<<13) @@ -3659,8 +3660,17 @@ enum { #define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0) /* Panel power sequencing */ -#define PP_STATUS _MMIO(0x61200) -#define PP_ON (1 << 31) +#define PPS_BASE 0x61200 +#define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE) +#define PCH_PPS_BASE 0xC7200 + +#define _MMIO_PPS(pps_idx, reg) _MMIO(dev_priv->pps_mmio_base - \ + PPS_BASE + (reg) + \ + (pps_idx) * 0x100) + +#define _PP_STATUS 0x61200 +#define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS) +#define PP_ON (1 << 31) /* * Indicates that all dependencies of the panel are on: * @@ -3668,14 +3678,14 @@ enum { * - pipe enabled * - LVDS/DVOB/DVOC on */ -#define PP_READY (1 << 30) -#define PP_SEQUENCE_NONE (0 << 28) -#define PP_SEQUENCE_POWER_UP (1 << 28) -#define PP_SEQUENCE_POWER_DOWN (2 << 28) -#define PP_SEQUENCE_MASK (3 << 28) -#define PP_SEQUENCE_SHIFT 28 -#define PP_CYCLE_DELAY_ACTIVE (1 << 27) -#define PP_SEQUENCE_STATE_MASK 0x0000000f +#define PP_READY (1 << 30) +#define PP_SEQUENCE_NONE (0 << 28) +#define PP_SEQUENCE_POWER_UP (1 << 28) +#define PP_SEQUENCE_POWER_DOWN (2 << 28) +#define PP_SEQUENCE_MASK (3 << 28) +#define PP_SEQUENCE_SHIFT 28 +#define PP_CYCLE_DELAY_ACTIVE (1 << 27) +#define PP_SEQUENCE_STATE_MASK 0x0000000f #define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0) #define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0) #define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0) @@ -3685,11 +3695,46 @@ enum { #define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0) #define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0) #define PP_SEQUENCE_STATE_RESET (0xf << 0) -#define PP_CONTROL _MMIO(0x61204) -#define POWER_TARGET_ON (1 << 0) -#define PP_ON_DELAYS _MMIO(0x61208) -#define PP_OFF_DELAYS _MMIO(0x6120c) -#define PP_DIVISOR _MMIO(0x61210) + +#define _PP_CONTROL 0x61204 +#define PP_CONTROL(pps_idx) _MMIO_PPS(pps_idx, _PP_CONTROL) +#define PANEL_UNLOCK_REGS (0xabcd << 16) +#define PANEL_UNLOCK_MASK (0xffff << 16) +#define BXT_POWER_CYCLE_DELAY_MASK 0x1f0 +#define BXT_POWER_CYCLE_DELAY_SHIFT 4 +#define EDP_FORCE_VDD (1 << 3) +#define EDP_BLC_ENABLE (1 << 2) +#define PANEL_POWER_RESET (1 << 1) +#define PANEL_POWER_OFF (0 << 0) +#define PANEL_POWER_ON (1 << 0) + +#define _PP_ON_DELAYS 0x61208 +#define PP_ON_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_ON_DELAYS) +#define PANEL_PORT_SELECT_SHIFT 30 +#define PANEL_PORT_SELECT_MASK (3 << 30) +#define PANEL_PORT_SELECT_LVDS (0 << 30) +#define PANEL_PORT_SELECT_DPA (1 << 30) +#define PANEL_PORT_SELECT_DPC (2 << 30) +#define PANEL_PORT_SELECT_DPD (3 << 30) +#define PANEL_PORT_SELECT_VLV(port) ((port) << 30) +#define PANEL_POWER_UP_DELAY_MASK 0x1fff0000 +#define PANEL_POWER_UP_DELAY_SHIFT 16 +#define PANEL_LIGHT_ON_DELAY_MASK 0x1fff +#define PANEL_LIGHT_ON_DELAY_SHIFT 0 + +#define _PP_OFF_DELAYS 0x6120C +#define PP_OFF_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_OFF_DELAYS) +#define PANEL_POWER_DOWN_DELAY_MASK 0x1fff0000 +#define PANEL_POWER_DOWN_DELAY_SHIFT 16 +#define PANEL_LIGHT_OFF_DELAY_MASK 0x1fff +#define PANEL_LIGHT_OFF_DELAY_SHIFT 0 + +#define _PP_DIVISOR 0x61210 +#define PP_DIVISOR(pps_idx) _MMIO_PPS(pps_idx, _PP_DIVISOR) +#define PP_REFERENCE_DIVIDER_MASK 0xffffff00 +#define PP_REFERENCE_DIVIDER_SHIFT 8 +#define PANEL_POWER_CYCLE_DELAY_MASK 0x1f +#define PANEL_POWER_CYCLE_DELAY_SHIFT 0 /* Panel fitting */ #define PFIT_CONTROL _MMIO(dev_priv->info.display_mmio_offset + 0x61230) @@ -6132,6 +6177,7 @@ enum { # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) # define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14) #define COMMON_SLICE_CHICKEN2 _MMIO(0x7014) +# define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1<<12) # define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8) # define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) @@ -6748,77 +6794,6 @@ enum { #define PCH_LVDS _MMIO(0xe1180) #define LVDS_DETECTED (1 << 1) -/* vlv has 2 sets of panel control regs. */ -#define _PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200) -#define _PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204) -#define _PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208) -#define PANEL_PORT_SELECT_VLV(port) ((port) << 30) -#define _PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c) -#define _PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210) - -#define _PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300) -#define _PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304) -#define _PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308) -#define _PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c) -#define _PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310) - -#define VLV_PIPE_PP_STATUS(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_STATUS, _PIPEB_PP_STATUS) -#define VLV_PIPE_PP_CONTROL(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_CONTROL, _PIPEB_PP_CONTROL) -#define VLV_PIPE_PP_ON_DELAYS(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_ON_DELAYS, _PIPEB_PP_ON_DELAYS) -#define VLV_PIPE_PP_OFF_DELAYS(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_OFF_DELAYS, _PIPEB_PP_OFF_DELAYS) -#define VLV_PIPE_PP_DIVISOR(pipe) _MMIO_PIPE(pipe, _PIPEA_PP_DIVISOR, _PIPEB_PP_DIVISOR) - -#define _PCH_PP_STATUS 0xc7200 -#define _PCH_PP_CONTROL 0xc7204 -#define PANEL_UNLOCK_REGS (0xabcd << 16) -#define PANEL_UNLOCK_MASK (0xffff << 16) -#define BXT_POWER_CYCLE_DELAY_MASK (0x1f0) -#define BXT_POWER_CYCLE_DELAY_SHIFT 4 -#define EDP_FORCE_VDD (1 << 3) -#define EDP_BLC_ENABLE (1 << 2) -#define PANEL_POWER_RESET (1 << 1) -#define PANEL_POWER_OFF (0 << 0) -#define PANEL_POWER_ON (1 << 0) -#define _PCH_PP_ON_DELAYS 0xc7208 -#define PANEL_PORT_SELECT_MASK (3 << 30) -#define PANEL_PORT_SELECT_LVDS (0 << 30) -#define PANEL_PORT_SELECT_DPA (1 << 30) -#define PANEL_PORT_SELECT_DPC (2 << 30) -#define PANEL_PORT_SELECT_DPD (3 << 30) -#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000) -#define PANEL_POWER_UP_DELAY_SHIFT 16 -#define PANEL_LIGHT_ON_DELAY_MASK (0x1fff) -#define PANEL_LIGHT_ON_DELAY_SHIFT 0 - -#define _PCH_PP_OFF_DELAYS 0xc720c -#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) -#define PANEL_POWER_DOWN_DELAY_SHIFT 16 -#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) -#define PANEL_LIGHT_OFF_DELAY_SHIFT 0 - -#define _PCH_PP_DIVISOR 0xc7210 -#define PP_REFERENCE_DIVIDER_MASK (0xffffff00) -#define PP_REFERENCE_DIVIDER_SHIFT 8 -#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f) -#define PANEL_POWER_CYCLE_DELAY_SHIFT 0 - -#define PCH_PP_STATUS _MMIO(_PCH_PP_STATUS) -#define PCH_PP_CONTROL _MMIO(_PCH_PP_CONTROL) -#define PCH_PP_ON_DELAYS _MMIO(_PCH_PP_ON_DELAYS) -#define PCH_PP_OFF_DELAYS _MMIO(_PCH_PP_OFF_DELAYS) -#define PCH_PP_DIVISOR _MMIO(_PCH_PP_DIVISOR) - -/* BXT PPS changes - 2nd set of PPS registers */ -#define _BXT_PP_STATUS2 0xc7300 -#define _BXT_PP_CONTROL2 0xc7304 -#define _BXT_PP_ON_DELAYS2 0xc7308 -#define _BXT_PP_OFF_DELAYS2 0xc730c - -#define BXT_PP_STATUS(n) _MMIO_PIPE(n, _PCH_PP_STATUS, _BXT_PP_STATUS2) -#define BXT_PP_CONTROL(n) _MMIO_PIPE(n, _PCH_PP_CONTROL, _BXT_PP_CONTROL2) -#define BXT_PP_ON_DELAYS(n) _MMIO_PIPE(n, _PCH_PP_ON_DELAYS, _BXT_PP_ON_DELAYS2) -#define BXT_PP_OFF_DELAYS(n) _MMIO_PIPE(n, _PCH_PP_OFF_DELAYS, _BXT_PP_OFF_DELAYS2) - #define _PCH_DP_B 0xe4100 #define PCH_DP_B _MMIO(_PCH_DP_B) #define _PCH_DPB_AUX_CH_CTL 0xe4110 @@ -6958,6 +6933,9 @@ enum { #define ECOBUS _MMIO(0xa180) #define FORCEWAKE_MT_ENABLE (1<<5) #define VLV_SPAREG2H _MMIO(0xA194) +#define GEN9_PWRGT_DOMAIN_STATUS _MMIO(0xA2A0) +#define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0) +#define GEN9_PWRGT_RENDER_STATUS_MASK (1 << 1) #define GTFIFODBG _MMIO(0x120000) #define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20) @@ -7058,12 +7036,13 @@ enum { #define GEN6_RP_UP_THRESHOLD _MMIO(0xA02C) #define GEN6_RP_DOWN_THRESHOLD _MMIO(0xA030) #define GEN6_RP_CUR_UP_EI _MMIO(0xA050) -#define GEN6_CURICONT_MASK 0xffffff +#define GEN6_RP_EI_MASK 0xffffff +#define GEN6_CURICONT_MASK GEN6_RP_EI_MASK #define GEN6_RP_CUR_UP _MMIO(0xA054) -#define GEN6_CURBSYTAVG_MASK 0xffffff +#define GEN6_CURBSYTAVG_MASK GEN6_RP_EI_MASK #define GEN6_RP_PREV_UP _MMIO(0xA058) #define GEN6_RP_CUR_DOWN_EI _MMIO(0xA05C) -#define GEN6_CURIAVG_MASK 0xffffff +#define GEN6_CURIAVG_MASK GEN6_RP_EI_MASK #define GEN6_RP_CUR_DOWN _MMIO(0xA060) #define GEN6_RP_PREV_DOWN _MMIO(0xA064) #define GEN6_RP_UP_EI _MMIO(0xA068) @@ -7485,6 +7464,7 @@ enum { #define _DDI_BUF_TRANS_A 0x64E00 #define _DDI_BUF_TRANS_B 0x64E60 #define DDI_BUF_TRANS_LO(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8) +#define DDI_BUF_BALANCE_LEG_ENABLE (1 << 31) #define DDI_BUF_TRANS_HI(port, i) _MMIO(_PORT(port, _DDI_BUF_TRANS_A, _DDI_BUF_TRANS_B) + (i) * 8 + 4) /* Sideband Interface (SBI) is programmed indirectly, via diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 5cfe4c7716b4..4f272777b4f4 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -37,25 +37,6 @@ static void i915_save_display(struct drm_device *dev) if (INTEL_INFO(dev)->gen <= 4) dev_priv->regfile.saveDSPARB = I915_READ(DSPARB); - /* LVDS state */ - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) - dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); - else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev)) - dev_priv->regfile.saveLVDS = I915_READ(LVDS); - - /* Panel power sequencer */ - if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL); - dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); - dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); - dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); - } else if (INTEL_INFO(dev)->gen <= 4) { - dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); - dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); - dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); - dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR); - } - /* save FBC interval */ if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev)) dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL); @@ -64,33 +45,11 @@ static void i915_save_display(struct drm_device *dev) static void i915_restore_display(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - u32 mask = 0xffffffff; /* Display arbitration */ if (INTEL_INFO(dev)->gen <= 4) I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB); - mask = ~LVDS_PORT_EN; - - /* LVDS state */ - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) - I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask); - else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev)) - I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask); - - /* Panel power sequencer */ - if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); - I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); - I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); - I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL); - } else if (INTEL_INFO(dev)->gen <= 4) { - I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); - I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); - I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); - I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL); - } - /* only restore FBC info on the platform that supports FBC*/ intel_fbc_global_disable(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index d61829e54f93..f1ffde7f7c0b 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -271,8 +271,6 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, struct drm_i915_private *dev_priv = to_i915(dev); int ret; - flush_delayed_work(&dev_priv->rps.delayed_resume_work); - intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->rps.hw_lock); @@ -303,19 +301,46 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; struct drm_i915_private *dev_priv = to_i915(dev); - int ret; - flush_delayed_work(&dev_priv->rps.delayed_resume_work); + return snprintf(buf, PAGE_SIZE, "%d\n", + intel_gpu_freq(dev_priv, + dev_priv->rps.cur_freq)); +} - intel_runtime_pm_get(dev_priv); +static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) +{ + struct drm_minor *minor = dev_to_drm_minor(kdev); + struct drm_i915_private *dev_priv = to_i915(minor->dev); + + return snprintf(buf, PAGE_SIZE, "%d\n", + intel_gpu_freq(dev_priv, + dev_priv->rps.boost_freq)); +} + +static ssize_t gt_boost_freq_mhz_store(struct device *kdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct drm_minor *minor = dev_to_drm_minor(kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = to_i915(dev); + u32 val; + ssize_t ret; + + ret = kstrtou32(buf, 0, &val); + if (ret) + return ret; + + /* Validate against (static) hardware limits */ + val = intel_freq_opcode(dev_priv, val); + if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq) + return -EINVAL; mutex_lock(&dev_priv->rps.hw_lock); - ret = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq); + dev_priv->rps.boost_freq = val; mutex_unlock(&dev_priv->rps.hw_lock); - intel_runtime_pm_put(dev_priv); - - return snprintf(buf, PAGE_SIZE, "%d\n", ret); + return count; } static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, @@ -325,9 +350,9 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, struct drm_device *dev = minor->dev; struct drm_i915_private *dev_priv = to_i915(dev); - return snprintf(buf, PAGE_SIZE, - "%d\n", - intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); + return snprintf(buf, PAGE_SIZE, "%d\n", + intel_gpu_freq(dev_priv, + dev_priv->rps.efficient_freq)); } static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) @@ -335,15 +360,10 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; struct drm_i915_private *dev_priv = to_i915(dev); - int ret; - - flush_delayed_work(&dev_priv->rps.delayed_resume_work); - - mutex_lock(&dev_priv->rps.hw_lock); - ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); - mutex_unlock(&dev_priv->rps.hw_lock); - return snprintf(buf, PAGE_SIZE, "%d\n", ret); + return snprintf(buf, PAGE_SIZE, "%d\n", + intel_gpu_freq(dev_priv, + dev_priv->rps.max_freq_softlimit)); } static ssize_t gt_max_freq_mhz_store(struct device *kdev, @@ -360,8 +380,6 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, if (ret) return ret; - flush_delayed_work(&dev_priv->rps.delayed_resume_work); - intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->rps.hw_lock); @@ -403,15 +421,10 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute struct drm_minor *minor = dev_to_drm_minor(kdev); struct drm_device *dev = minor->dev; struct drm_i915_private *dev_priv = to_i915(dev); - int ret; - - flush_delayed_work(&dev_priv->rps.delayed_resume_work); - mutex_lock(&dev_priv->rps.hw_lock); - ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); - mutex_unlock(&dev_priv->rps.hw_lock); - - return snprintf(buf, PAGE_SIZE, "%d\n", ret); + return snprintf(buf, PAGE_SIZE, "%d\n", + intel_gpu_freq(dev_priv, + dev_priv->rps.min_freq_softlimit)); } static ssize_t gt_min_freq_mhz_store(struct device *kdev, @@ -428,8 +441,6 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, if (ret) return ret; - flush_delayed_work(&dev_priv->rps.delayed_resume_work); - intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->rps.hw_lock); @@ -465,6 +476,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL); static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL); +static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store); static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store); static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store); @@ -498,6 +510,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr static const struct attribute *gen6_attrs[] = { &dev_attr_gt_act_freq_mhz.attr, &dev_attr_gt_cur_freq_mhz.attr, + &dev_attr_gt_boost_freq_mhz.attr, &dev_attr_gt_max_freq_mhz.attr, &dev_attr_gt_min_freq_mhz.attr, &dev_attr_gt_RP0_freq_mhz.attr, @@ -509,6 +522,7 @@ static const struct attribute *gen6_attrs[] = { static const struct attribute *vlv_attrs[] = { &dev_attr_gt_act_freq_mhz.attr, &dev_attr_gt_cur_freq_mhz.attr, + &dev_attr_gt_boost_freq_mhz.attr, &dev_attr_gt_max_freq_mhz.attr, &dev_attr_gt_min_freq_mhz.attr, &dev_attr_gt_RP0_freq_mhz.attr, diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 534154e05fbe..178798002a73 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -394,25 +394,27 @@ DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, ); TRACE_EVENT(i915_gem_evict, - TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags), - TP_ARGS(dev, size, align, flags), + TP_PROTO(struct i915_address_space *vm, u32 size, u32 align, unsigned int flags), + TP_ARGS(vm, size, align, flags), TP_STRUCT__entry( __field(u32, dev) + __field(struct i915_address_space *, vm) __field(u32, size) __field(u32, align) - __field(unsigned, flags) + __field(unsigned int, flags) ), TP_fast_assign( - __entry->dev = dev->primary->index; + __entry->dev = vm->dev->primary->index; + __entry->vm = vm; __entry->size = size; __entry->align = align; __entry->flags = flags; ), - TP_printk("dev=%d, size=%d, align=%d %s", - __entry->dev, __entry->size, __entry->align, + TP_printk("dev=%d, vm=%p, size=%d, align=%d %s", + __entry->dev, __entry->vm, __entry->size, __entry->align, __entry->flags & PIN_MAPPABLE ? ", mappable" : "") ); @@ -449,10 +451,9 @@ TRACE_EVENT(i915_gem_evict_vm, ); TRACE_EVENT(i915_gem_ring_sync_to, - TP_PROTO(struct drm_i915_gem_request *to_req, - struct intel_engine_cs *from, - struct drm_i915_gem_request *req), - TP_ARGS(to_req, from, req), + TP_PROTO(struct drm_i915_gem_request *to, + struct drm_i915_gem_request *from), + TP_ARGS(to, from), TP_STRUCT__entry( __field(u32, dev) @@ -463,9 +464,9 @@ TRACE_EVENT(i915_gem_ring_sync_to, TP_fast_assign( __entry->dev = from->i915->drm.primary->index; - __entry->sync_from = from->id; - __entry->sync_to = to_req->engine->id; - __entry->seqno = i915_gem_request_get_seqno(req); + __entry->sync_from = from->engine->id; + __entry->sync_to = to->engine->id; + __entry->seqno = from->fence.seqno; ), TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u", @@ -488,9 +489,9 @@ TRACE_EVENT(i915_gem_ring_dispatch, TP_fast_assign( __entry->dev = req->i915->drm.primary->index; __entry->ring = req->engine->id; - __entry->seqno = req->seqno; + __entry->seqno = req->fence.seqno; __entry->flags = flags; - intel_engine_enable_signaling(req); + fence_enable_sw_signaling(&req->fence); ), TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", @@ -533,7 +534,7 @@ DECLARE_EVENT_CLASS(i915_gem_request, TP_fast_assign( __entry->dev = req->i915->drm.primary->index; __entry->ring = req->engine->id; - __entry->seqno = req->seqno; + __entry->seqno = req->fence.seqno; ), TP_printk("dev=%u, ring=%u, seqno=%u", @@ -595,7 +596,7 @@ TRACE_EVENT(i915_gem_request_wait_begin, TP_fast_assign( __entry->dev = req->i915->drm.primary->index; __entry->ring = req->engine->id; - __entry->seqno = req->seqno; + __entry->seqno = req->fence.seqno; __entry->blocking = mutex_is_locked(&req->i915->drm.struct_mutex); ), diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index f6acb5a0e701..ca2e91259948 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -97,6 +97,7 @@ static struct _balloon_info_ bl_info; /** * intel_vgt_deballoon - deballoon reserved graphics address trunks + * @dev_priv: i915 device private data * * This function is called to deallocate the ballooned-out graphic memory, when * driver is unloaded or when ballooning fails. @@ -138,7 +139,7 @@ static int vgt_balloon_space(struct drm_mm *mm, /** * intel_vgt_balloon - balloon out reserved graphics address trunks - * @dev: drm device + * @dev_priv: i915 device private data * * This function is called at the initialization stage, to balloon out the * graphic address space allocated to other vGPUs, by marking these spaces as @@ -155,27 +156,27 @@ static int vgt_balloon_space(struct drm_mm *mm, * host point of view, the graphic address space is partitioned by multiple * vGPUs in different VMs. :: * - * vGPU1 view Host view - * 0 ------> +-----------+ +-----------+ - * ^ |###########| | vGPU3 | - * | |###########| +-----------+ - * | |###########| | vGPU2 | - * | +-----------+ +-----------+ - * mappable GM | available | ==> | vGPU1 | - * | +-----------+ +-----------+ - * | |###########| | | - * v |###########| | Host | - * +=======+===========+ +===========+ - * ^ |###########| | vGPU3 | - * | |###########| +-----------+ - * | |###########| | vGPU2 | - * | +-----------+ +-----------+ - * unmappable GM | available | ==> | vGPU1 | - * | +-----------+ +-----------+ - * | |###########| | | - * | |###########| | Host | - * v |###########| | | - * total GM size ------> +-----------+ +-----------+ + * vGPU1 view Host view + * 0 ------> +-----------+ +-----------+ + * ^ |###########| | vGPU3 | + * | |###########| +-----------+ + * | |###########| | vGPU2 | + * | +-----------+ +-----------+ + * mappable GM | available | ==> | vGPU1 | + * | +-----------+ +-----------+ + * | |###########| | | + * v |###########| | Host | + * +=======+===========+ +===========+ + * ^ |###########| | vGPU3 | + * | |###########| +-----------+ + * | |###########| | vGPU2 | + * | +-----------+ +-----------+ + * unmappable GM | available | ==> | vGPU1 | + * | +-----------+ +-----------+ + * | |###########| | | + * | |###########| | Host | + * v |###########| | | + * total GM size ------> +-----------+ +-----------+ * * Returns: * zero on success, non-zero if configuration invalid or ballooning failed diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index 7de7721f65bc..b82de3072d4f 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c @@ -55,7 +55,7 @@ intel_create_plane_state(struct drm_plane *plane) return NULL; state->base.plane = plane; - state->base.rotation = BIT(DRM_ROTATE_0); + state->base.rotation = DRM_ROTATE_0; state->ckey.flags = I915_SET_COLORKEY_NONE; return state; @@ -134,20 +134,6 @@ static int intel_plane_atomic_check(struct drm_plane *plane, crtc_state = to_intel_crtc_state(drm_crtc_state); - /* - * The original src/dest coordinates are stored in state->base, but - * we want to keep another copy internal to our driver that we can - * clip/modify ourselves. - */ - intel_state->src.x1 = state->src_x; - intel_state->src.y1 = state->src_y; - intel_state->src.x2 = state->src_x + state->src_w; - intel_state->src.y2 = state->src_y + state->src_h; - intel_state->dst.x1 = state->crtc_x; - intel_state->dst.y1 = state->crtc_y; - intel_state->dst.x2 = state->crtc_x + state->crtc_w; - intel_state->dst.y2 = state->crtc_y + state->crtc_h; - /* Clip all planes to CRTC size, or 0x0 if CRTC is disabled */ intel_state->clip.x1 = 0; intel_state->clip.y1 = 0; @@ -157,6 +143,7 @@ static int intel_plane_atomic_check(struct drm_plane *plane, crtc_state->base.enable ? crtc_state->pipe_src_h : 0; if (state->fb && intel_rotation_90_or_270(state->rotation)) { + char *format_name; if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) { DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n"); @@ -171,8 +158,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane, switch (state->fb->pixel_format) { case DRM_FORMAT_C8: case DRM_FORMAT_RGB565: - DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n", - drm_get_format_name(state->fb->pixel_format)); + format_name = drm_get_format_name(state->fb->pixel_format); + DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n", format_name); + kfree(format_name); return -EINVAL; default: @@ -180,7 +168,7 @@ static int intel_plane_atomic_check(struct drm_plane *plane, } } - intel_state->visible = false; + intel_state->base.visible = false; ret = intel_plane->check_plane(plane, crtc_state, intel_state); if (ret) return ret; @@ -196,7 +184,7 @@ static void intel_plane_atomic_update(struct drm_plane *plane, to_intel_plane_state(plane->state); struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc; - if (intel_state->visible) + if (intel_state->base.visible) intel_plane->update_plane(plane, to_intel_crtc_state(crtc->state), intel_state); diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 6700a7be7f78..85389cdd0bec 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -51,10 +51,10 @@ * related registers. (The notable exception is the power management, not * covered here.) * - * The struct i915_audio_component is used to interact between the graphics - * and audio drivers. The struct i915_audio_component_ops *ops in it is + * The struct &i915_audio_component is used to interact between the graphics + * and audio drivers. The struct &i915_audio_component_ops @ops in it is * defined in graphics driver and called in audio driver. The - * struct i915_audio_component_audio_ops *audio_ops is called from i915 driver. + * struct &i915_audio_component_audio_ops @audio_ops is called from i915 driver. */ static const struct { @@ -600,6 +600,8 @@ static void i915_audio_component_codec_wake_override(struct device *dev, if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)) return; + i915_audio_component_get_power(dev); + /* * Enable/disable generating the codec wake signal, overriding the * internal logic to generate the codec wake to controller. @@ -615,6 +617,8 @@ static void i915_audio_component_codec_wake_override(struct device *dev, I915_WRITE(HSW_AUD_CHICKENBIT, tmp); usleep_range(1000, 1500); } + + i915_audio_component_put_power(dev); } /* Get CDCLK in kHz */ @@ -648,6 +652,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev, !IS_HASWELL(dev_priv)) return 0; + i915_audio_component_get_power(dev); mutex_lock(&dev_priv->av_mutex); /* 1. get the pipe */ intel_encoder = dev_priv->dig_port_map[port]; @@ -698,6 +703,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev, unlock: mutex_unlock(&dev_priv->av_mutex); + i915_audio_component_put_power(dev); return err; } diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index b074f3d6d127..2491e4c1eaf0 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -26,6 +26,40 @@ #include "i915_drv.h" +static void intel_breadcrumbs_hangcheck(unsigned long data) +{ + struct intel_engine_cs *engine = (struct intel_engine_cs *)data; + struct intel_breadcrumbs *b = &engine->breadcrumbs; + + if (!b->irq_enabled) + return; + + if (time_before(jiffies, b->timeout)) { + mod_timer(&b->hangcheck, b->timeout); + return; + } + + DRM_DEBUG("Hangcheck timer elapsed... %s idle\n", engine->name); + set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings); + mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1); + + /* Ensure that even if the GPU hangs, we get woken up. + * + * However, note that if no one is waiting, we never notice + * a gpu hang. Eventually, we will have to wait for a resource + * held by the GPU and so trigger a hangcheck. In the most + * pathological case, this will be upon memory starvation! To + * prevent this, we also queue the hangcheck from the retire + * worker. + */ + i915_queue_hangcheck(engine->i915); +} + +static unsigned long wait_timeout(void) +{ + return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES); +} + static void intel_breadcrumbs_fake_irq(unsigned long data) { struct intel_engine_cs *engine = (struct intel_engine_cs *)data; @@ -37,10 +71,8 @@ static void intel_breadcrumbs_fake_irq(unsigned long data) * every jiffie in order to kick the oldest waiter to do the * coherent seqno check. */ - rcu_read_lock(); if (intel_engine_wakeup(engine)) mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1); - rcu_read_unlock(); } static void irq_enable(struct intel_engine_cs *engine) @@ -91,17 +123,13 @@ static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b) } if (!b->irq_enabled || - test_bit(engine->id, &i915->gpu_error.missed_irq_rings)) + test_bit(engine->id, &i915->gpu_error.missed_irq_rings)) { mod_timer(&b->fake_irq, jiffies + 1); - - /* Ensure that even if the GPU hangs, we get woken up. - * - * However, note that if no one is waiting, we never notice - * a gpu hang. Eventually, we will have to wait for a resource - * held by the GPU and so trigger a hangcheck. In the most - * pathological case, this will be upon memory starvation! - */ - i915_queue_hangcheck(i915); + } else { + /* Ensure we never sleep indefinitely */ + GEM_BUG_ON(!time_after(b->timeout, jiffies)); + mod_timer(&b->hangcheck, b->timeout); + } } static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b) @@ -204,7 +232,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine, } rb_link_node(&wait->node, parent, p); rb_insert_color(&wait->node, &b->waiters); - GEM_BUG_ON(!first && !b->irq_seqno_bh); + GEM_BUG_ON(!first && !rcu_access_pointer(b->irq_seqno_bh)); if (completed) { struct rb_node *next = rb_next(completed); @@ -212,8 +240,9 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine, GEM_BUG_ON(!next && !first); if (next && next != &wait->node) { GEM_BUG_ON(first); + b->timeout = wait_timeout(); b->first_wait = to_wait(next); - smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk); + rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk); /* As there is a delay between reading the current * seqno, processing the completed tasks and selecting * the next waiter, we may have missed the interrupt @@ -238,8 +267,9 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine, if (first) { GEM_BUG_ON(rb_first(&b->waiters) != &wait->node); + b->timeout = wait_timeout(); b->first_wait = wait; - smp_store_mb(b->irq_seqno_bh, wait->tsk); + rcu_assign_pointer(b->irq_seqno_bh, wait->tsk); /* After assigning ourselves as the new bottom-half, we must * perform a cursory check to prevent a missed interrupt. * Either we miss the interrupt whilst programming the hardware, @@ -250,7 +280,7 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine, */ __intel_breadcrumbs_enable_irq(b); } - GEM_BUG_ON(!b->irq_seqno_bh); + GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh)); GEM_BUG_ON(!b->first_wait); GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node); @@ -270,11 +300,6 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine, return first; } -void intel_engine_enable_fake_irq(struct intel_engine_cs *engine) -{ - mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1); -} - static inline bool chain_wakeup(struct rb_node *rb, int priority) { return rb && to_wait(rb)->tsk->prio <= priority; @@ -310,7 +335,7 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine, const int priority = wakeup_priority(b, wait->tsk); struct rb_node *next; - GEM_BUG_ON(b->irq_seqno_bh != wait->tsk); + GEM_BUG_ON(rcu_access_pointer(b->irq_seqno_bh) != wait->tsk); /* We are the current bottom-half. Find the next candidate, * the first waiter in the queue on the remaining oldest @@ -352,14 +377,15 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine, * the interrupt, or if we have to handle an * exception rather than a seqno completion. */ + b->timeout = wait_timeout(); b->first_wait = to_wait(next); - smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk); + rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk); if (b->first_wait->seqno != wait->seqno) __intel_breadcrumbs_enable_irq(b); - wake_up_process(b->irq_seqno_bh); + wake_up_process(b->first_wait->tsk); } else { b->first_wait = NULL; - WRITE_ONCE(b->irq_seqno_bh, NULL); + rcu_assign_pointer(b->irq_seqno_bh, NULL); __intel_breadcrumbs_disable_irq(b); } } else { @@ -373,7 +399,7 @@ out_unlock: GEM_BUG_ON(b->first_wait == wait); GEM_BUG_ON(rb_first(&b->waiters) != (b->first_wait ? &b->first_wait->node : NULL)); - GEM_BUG_ON(!b->irq_seqno_bh ^ RB_EMPTY_ROOT(&b->waiters)); + GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters)); spin_unlock(&b->lock); } @@ -436,6 +462,7 @@ static int intel_breadcrumbs_signaler(void *arg) */ intel_engine_remove_wait(engine, &request->signaling.wait); + fence_signal(&request->fence); /* Find the next oldest signal. Note that as we have * not been holding the lock, another client may @@ -452,7 +479,7 @@ static int intel_breadcrumbs_signaler(void *arg) rb_erase(&request->signaling.node, &b->signals); spin_unlock(&b->lock); - i915_gem_request_unreference(request); + i915_gem_request_put(request); } else { if (kthread_should_stop()) break; @@ -472,18 +499,14 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request) struct rb_node *parent, **p; bool first, wakeup; - if (unlikely(READ_ONCE(request->signaling.wait.tsk))) - return; - - spin_lock(&b->lock); - if (unlikely(request->signaling.wait.tsk)) { - wakeup = false; - goto unlock; - } + /* locked by fence_enable_sw_signaling() */ + assert_spin_locked(&request->lock); request->signaling.wait.tsk = b->signaler; - request->signaling.wait.seqno = request->seqno; - i915_gem_request_reference(request); + request->signaling.wait.seqno = request->fence.seqno; + i915_gem_request_get(request); + + spin_lock(&b->lock); /* First add ourselves into the list of waiters, but register our * bottom-half as the signaller thread. As per usual, only the oldest @@ -504,8 +527,8 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request) p = &b->signals.rb_node; while (*p) { parent = *p; - if (i915_seqno_passed(request->seqno, - to_signaler(parent)->seqno)) { + if (i915_seqno_passed(request->fence.seqno, + to_signaler(parent)->fence.seqno)) { p = &parent->rb_right; first = false; } else { @@ -517,7 +540,6 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request) if (first) smp_store_mb(b->first_signal, request); -unlock: spin_unlock(&b->lock); if (wakeup) @@ -533,6 +555,9 @@ int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine) setup_timer(&b->fake_irq, intel_breadcrumbs_fake_irq, (unsigned long)engine); + setup_timer(&b->hangcheck, + intel_breadcrumbs_hangcheck, + (unsigned long)engine); /* Spawn a thread to provide a common bottom-half for all signals. * As this is an asynchronous interface we cannot steal the current @@ -557,6 +582,7 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) if (!IS_ERR_OR_NULL(b->signaler)) kthread_stop(b->signaler); + del_timer_sync(&b->hangcheck); del_timer_sync(&b->fake_irq); } @@ -570,11 +596,9 @@ unsigned int intel_kick_waiters(struct drm_i915_private *i915) * RCU lock, i.e. as we call wake_up_process() we must be holding the * rcu_read_lock(). */ - rcu_read_lock(); for_each_engine(engine, i915) if (unlikely(intel_engine_wakeup(engine))) mask |= intel_engine_flag(engine); - rcu_read_unlock(); return mask; } diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 3edb9580928e..fb27d187876c 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -32,13 +32,6 @@ * onwards to drive newly added DMC (Display microcontroller) in display * engine to save and restore the state of display engine when it enter into * low-power state and comes back to normal. - * - * Firmware loading status will be one of the below states: FW_UNINITIALIZED, - * FW_LOADED, FW_FAILED. - * - * Once the firmware is written into the registers status will be moved from - * FW_UNINITIALIZED to FW_LOADED and for any erroneous condition status will - * be moved to FW_FAILED. */ #define I915_CSR_KBL "i915/kbl_dmc_ver1.bin" diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index dd1d6fe12297..c2df4e429b19 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -145,7 +145,7 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = { static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { { 0x0000201B, 0x000000A2, 0x0 }, { 0x00005012, 0x00000088, 0x0 }, - { 0x80007011, 0x000000CD, 0x0 }, + { 0x80007011, 0x000000CD, 0x1 }, { 0x80009010, 0x000000C0, 0x1 }, { 0x0000201B, 0x0000009D, 0x0 }, { 0x80005012, 0x000000C0, 0x1 }, @@ -158,7 +158,7 @@ static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = { { 0x00000018, 0x000000A2, 0x0 }, { 0x00005012, 0x00000088, 0x0 }, - { 0x80007011, 0x000000CD, 0x0 }, + { 0x80007011, 0x000000CD, 0x3 }, { 0x80009010, 0x000000C0, 0x3 }, { 0x00000018, 0x0000009D, 0x0 }, { 0x80005012, 0x000000C0, 0x3 }, @@ -301,45 +301,34 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = { { 154, 0x9A, 1, 128, true }, /* 9: 1200 0 */ }; -static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv, - u32 level, enum port port, int type); - -static void ddi_get_encoder_port(struct intel_encoder *intel_encoder, - struct intel_digital_port **dig_port, - enum port *port) +enum port intel_ddi_get_encoder_port(struct intel_encoder *encoder) { - struct drm_encoder *encoder = &intel_encoder->base; - - switch (intel_encoder->type) { + switch (encoder->type) { case INTEL_OUTPUT_DP_MST: - *dig_port = enc_to_mst(encoder)->primary; - *port = (*dig_port)->port; - break; - default: - WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type); - /* fallthrough and treat as unknown */ + return enc_to_mst(&encoder->base)->primary->port; case INTEL_OUTPUT_DP: case INTEL_OUTPUT_EDP: case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_UNKNOWN: - *dig_port = enc_to_dig_port(encoder); - *port = (*dig_port)->port; - break; + return enc_to_dig_port(&encoder->base)->port; case INTEL_OUTPUT_ANALOG: - *dig_port = NULL; - *port = PORT_E; - break; + return PORT_E; + default: + MISSING_CASE(encoder->type); + return PORT_A; } } -enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) +static const struct ddi_buf_trans * +bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) { - struct intel_digital_port *dig_port; - enum port port; - - ddi_get_encoder_port(intel_encoder, &dig_port, &port); - - return port; + if (dev_priv->vbt.edp.low_vswing) { + *n_entries = ARRAY_SIZE(bdw_ddi_translations_edp); + return bdw_ddi_translations_edp; + } else { + *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp); + return bdw_ddi_translations_dp; + } } static const struct ddi_buf_trans * @@ -388,39 +377,58 @@ skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) } } +static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) +{ + int n_hdmi_entries; + int hdmi_level; + int hdmi_default_entry; + + hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; + + if (IS_BROXTON(dev_priv)) + return hdmi_level; + + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries); + hdmi_default_entry = 8; + } else if (IS_BROADWELL(dev_priv)) { + n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); + hdmi_default_entry = 7; + } else if (IS_HASWELL(dev_priv)) { + n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); + hdmi_default_entry = 6; + } else { + WARN(1, "ddi translation table missing\n"); + n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); + hdmi_default_entry = 7; + } + + /* Choose a good default if VBT is badly populated */ + if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN || + hdmi_level >= n_hdmi_entries) + hdmi_level = hdmi_default_entry; + + return hdmi_level; +} + /* * Starting with Haswell, DDI port buffers must be programmed with correct - * values in advance. The buffer values are different for FDI and DP modes, - * but the HDMI/DVI fields are shared among those. So we program the DDI - * in either FDI or DP modes only, as HDMI connections will work with both - * of those + * values in advance. This function programs the correct values for + * DP/eDP/FDI use cases. */ -void intel_prepare_ddi_buffer(struct intel_encoder *encoder) +void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 iboost_bit = 0; - int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry, - size; - int hdmi_level; - enum port port; + int i, n_dp_entries, n_edp_entries, size; + enum port port = intel_ddi_get_encoder_port(encoder); const struct ddi_buf_trans *ddi_translations_fdi; const struct ddi_buf_trans *ddi_translations_dp; const struct ddi_buf_trans *ddi_translations_edp; - const struct ddi_buf_trans *ddi_translations_hdmi; const struct ddi_buf_trans *ddi_translations; - port = intel_ddi_get_encoder_port(encoder); - hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; - - if (IS_BROXTON(dev_priv)) { - if (encoder->type != INTEL_OUTPUT_HDMI) - return; - - /* Vswing programming for HDMI */ - bxt_ddi_vswing_sequence(dev_priv, hdmi_level, port, - INTEL_OUTPUT_HDMI); + if (IS_BROXTON(dev_priv)) return; - } if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { ddi_translations_fdi = NULL; @@ -428,13 +436,10 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) skl_get_buf_trans_dp(dev_priv, &n_dp_entries); ddi_translations_edp = skl_get_buf_trans_edp(dev_priv, &n_edp_entries); - ddi_translations_hdmi = - skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries); - hdmi_default_entry = 8; + /* If we're boosting the current, set bit 31 of trans1 */ - if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level || - dev_priv->vbt.ddi_port_info[port].dp_boost_level) - iboost_bit = 1<<31; + if (dev_priv->vbt.ddi_port_info[port].dp_boost_level) + iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE; if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP && port != PORT_A && port != PORT_E && @@ -443,38 +448,20 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) } else if (IS_BROADWELL(dev_priv)) { ddi_translations_fdi = bdw_ddi_translations_fdi; ddi_translations_dp = bdw_ddi_translations_dp; - - if (dev_priv->vbt.edp.low_vswing) { - ddi_translations_edp = bdw_ddi_translations_edp; - n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); - } else { - ddi_translations_edp = bdw_ddi_translations_dp; - n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); - } - - ddi_translations_hdmi = bdw_ddi_translations_hdmi; - + ddi_translations_edp = bdw_get_buf_trans_edp(dev_priv, &n_edp_entries); n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); - n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); - hdmi_default_entry = 7; } else if (IS_HASWELL(dev_priv)) { ddi_translations_fdi = hsw_ddi_translations_fdi; ddi_translations_dp = hsw_ddi_translations_dp; ddi_translations_edp = hsw_ddi_translations_dp; - ddi_translations_hdmi = hsw_ddi_translations_hdmi; n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp); - n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); - hdmi_default_entry = 6; } else { WARN(1, "ddi translation table missing\n"); ddi_translations_edp = bdw_ddi_translations_dp; ddi_translations_fdi = bdw_ddi_translations_fdi; ddi_translations_dp = bdw_ddi_translations_dp; - ddi_translations_hdmi = bdw_ddi_translations_hdmi; n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); - n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); - hdmi_default_entry = 7; } switch (encoder->type) { @@ -483,7 +470,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) size = n_edp_entries; break; case INTEL_OUTPUT_DP: - case INTEL_OUTPUT_HDMI: ddi_translations = ddi_translations_dp; size = n_dp_entries; break; @@ -501,19 +487,48 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) I915_WRITE(DDI_BUF_TRANS_HI(port, i), ddi_translations[i].trans2); } +} + +/* + * Starting with Haswell, DDI port buffers must be programmed with correct + * values in advance. This function programs the correct values for + * HDMI/DVI use cases. + */ +static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + u32 iboost_bit = 0; + int n_hdmi_entries, hdmi_level; + enum port port = intel_ddi_get_encoder_port(encoder); + const struct ddi_buf_trans *ddi_translations_hdmi; - if (encoder->type != INTEL_OUTPUT_HDMI) + if (IS_BROXTON(dev_priv)) return; - /* Choose a good default if VBT is badly populated */ - if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN || - hdmi_level >= n_hdmi_entries) - hdmi_level = hdmi_default_entry; + hdmi_level = intel_ddi_hdmi_level(dev_priv, port); + + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + ddi_translations_hdmi = skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries); + + /* If we're boosting the current, set bit 31 of trans1 */ + if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level) + iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE; + } else if (IS_BROADWELL(dev_priv)) { + ddi_translations_hdmi = bdw_ddi_translations_hdmi; + n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); + } else if (IS_HASWELL(dev_priv)) { + ddi_translations_hdmi = hsw_ddi_translations_hdmi; + n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); + } else { + WARN(1, "ddi translation table missing\n"); + ddi_translations_hdmi = bdw_ddi_translations_hdmi; + n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); + } /* Entry 9 is for HDMI: */ - I915_WRITE(DDI_BUF_TRANS_LO(port, i), + I915_WRITE(DDI_BUF_TRANS_LO(port, 9), ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit); - I915_WRITE(DDI_BUF_TRANS_HI(port, i), + I915_WRITE(DDI_BUF_TRANS_HI(port, 9), ddi_translations_hdmi[hdmi_level].trans2); } @@ -550,7 +565,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) for_each_encoder_on_crtc(dev, crtc, encoder) { WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG); - intel_prepare_ddi_buffer(encoder); + intel_prepare_dp_ddi_buffers(encoder); } /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the @@ -1111,7 +1126,6 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); - struct drm_encoder *encoder = &intel_encoder->base; struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = intel_crtc->pipe; @@ -1177,29 +1191,15 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) temp |= TRANS_DDI_MODE_SELECT_HDMI; else temp |= TRANS_DDI_MODE_SELECT_DVI; - } else if (type == INTEL_OUTPUT_ANALOG) { temp |= TRANS_DDI_MODE_SELECT_FDI; temp |= (intel_crtc->config->fdi_lanes - 1) << 1; - } else if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - if (intel_dp->is_mst) { - temp |= TRANS_DDI_MODE_SELECT_DP_MST; - } else - temp |= TRANS_DDI_MODE_SELECT_DP_SST; - + temp |= TRANS_DDI_MODE_SELECT_DP_SST; temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count); } else if (type == INTEL_OUTPUT_DP_MST) { - struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp; - - if (intel_dp->is_mst) { - temp |= TRANS_DDI_MODE_SELECT_DP_MST; - } else - temp |= TRANS_DDI_MODE_SELECT_DP_SST; - + temp |= TRANS_DDI_MODE_SELECT_DP_MST; temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count); } else { WARN(1, "Invalid encoder type %d for pipe %c\n", @@ -1379,14 +1379,30 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc) TRANS_CLK_SEL_DISABLED); } -static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv, - u32 level, enum port port, int type) +static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv, + enum port port, uint8_t iboost) { + u32 tmp; + + tmp = I915_READ(DISPIO_CR_TX_BMU_CR0); + tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port)); + if (iboost) + tmp |= iboost << BALANCE_LEG_SHIFT(port); + else + tmp |= BALANCE_LEG_DISABLE(port); + I915_WRITE(DISPIO_CR_TX_BMU_CR0, tmp); +} + +static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level) +{ + struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); + struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); + enum port port = intel_dig_port->port; + int type = encoder->type; const struct ddi_buf_trans *ddi_translations; uint8_t iboost; uint8_t dp_iboost, hdmi_iboost; int n_entries; - u32 reg; /* VBT may override standard boost values */ dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level; @@ -1428,16 +1444,10 @@ static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv, return; } - reg = I915_READ(DISPIO_CR_TX_BMU_CR0); - reg &= ~BALANCE_LEG_MASK(port); - reg &= ~(1 << (BALANCE_LEG_DISABLE_SHIFT + port)); + _skl_ddi_set_iboost(dev_priv, port, iboost); - if (iboost) - reg |= iboost << BALANCE_LEG_SHIFT(port); - else - reg |= 1 << (BALANCE_LEG_DISABLE_SHIFT + port); - - I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg); + if (port == PORT_A && intel_dig_port->max_lanes == 4) + _skl_ddi_set_iboost(dev_priv, PORT_E, iboost); } static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv, @@ -1568,7 +1578,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp) level = translate_signal_level(signal_levels); if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) - skl_ddi_set_iboost(dev_priv, level, port, encoder->type); + skl_ddi_set_iboost(encoder, level); else if (IS_BROXTON(dev_priv)) bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type); @@ -1615,8 +1625,6 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); } - intel_prepare_ddi_buffer(intel_encoder); - if (type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_edp_panel_on(intel_dp); @@ -1627,6 +1635,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + intel_prepare_dp_ddi_buffers(intel_encoder); + intel_dp_set_link_params(intel_dp, crtc->config); intel_ddi_init_dp_buf_reg(intel_encoder); @@ -1637,6 +1647,15 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) intel_dp_stop_link_train(intel_dp); } else if (type == INTEL_OUTPUT_HDMI) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + int level = intel_ddi_hdmi_level(dev_priv, port); + + intel_prepare_hdmi_ddi_buffers(intel_encoder); + + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + skl_ddi_set_iboost(intel_encoder, level); + else if (IS_BROXTON(dev_priv)) + bxt_ddi_vswing_sequence(dev_priv, level, port, + INTEL_OUTPUT_HDMI); intel_hdmi->set_infoframes(encoder, crtc->config->has_hdmi_sink, @@ -2105,7 +2124,7 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) val = DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE; - if (intel_dp->is_mst) + if (intel_dp->link_mst) val |= DP_TP_CTL_MODE_MST; else { val |= DP_TP_CTL_MODE_SST; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c457eed76f1f..d224f64836c5 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -34,6 +34,7 @@ #include <drm/drm_edid.h> #include <drm/drmP.h> #include "intel_drv.h" +#include "intel_frontbuffer.h" #include <drm/i915_drm.h> #include "i915_drv.h" #include "i915_gem_dmabuf.h" @@ -1201,8 +1202,8 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, if (HAS_PCH_SPLIT(dev)) { u32 port_sel; - pp_reg = PCH_PP_CONTROL; - port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK; + pp_reg = PP_CONTROL(0); + port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; if (port_sel == PANEL_PORT_SELECT_LVDS && I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) @@ -1210,10 +1211,10 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, /* XXX: else fix for eDP */ } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { /* presumably write lock depends on pipe, not port select */ - pp_reg = VLV_PIPE_PP_CONTROL(pipe); + pp_reg = PP_CONTROL(pipe); panel_pipe = pipe; } else { - pp_reg = PP_CONTROL; + pp_reg = PP_CONTROL(0); if (I915_READ(LVDS) & LVDS_PIPEB_SELECT) panel_pipe = PIPE_B; } @@ -1958,12 +1959,12 @@ static void intel_enable_pipe(struct intel_crtc *crtc) * a plane. On ILK+ the pipe PLLs are integrated, so we don't * need the check. */ - if (HAS_GMCH_DISPLAY(dev_priv)) + if (HAS_GMCH_DISPLAY(dev_priv)) { if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI)) assert_dsi_pll_enabled(dev_priv); else assert_pll_enabled(dev_priv, pipe); - else { + } else { if (crtc->config->has_pch_encoder) { /* if driving the PCH, we need FDI enabled */ assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); @@ -2146,33 +2147,6 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, } } -static void -intel_fill_fb_info(struct drm_i915_private *dev_priv, - struct drm_framebuffer *fb) -{ - struct intel_rotation_info *info = &to_intel_framebuffer(fb)->rot_info; - unsigned int tile_size, tile_width, tile_height, cpp; - - tile_size = intel_tile_size(dev_priv); - - cpp = drm_format_plane_cpp(fb->pixel_format, 0); - intel_tile_dims(dev_priv, &tile_width, &tile_height, - fb->modifier[0], cpp); - - info->plane[0].width = DIV_ROUND_UP(fb->pitches[0], tile_width * cpp); - info->plane[0].height = DIV_ROUND_UP(fb->height, tile_height); - - if (info->pixel_format == DRM_FORMAT_NV12) { - cpp = drm_format_plane_cpp(fb->pixel_format, 1); - intel_tile_dims(dev_priv, &tile_width, &tile_height, - fb->modifier[1], cpp); - - info->uv_offset = fb->offsets[1]; - info->plane[1].width = DIV_ROUND_UP(fb->pitches[1], tile_width * cpp); - info->plane[1].height = DIV_ROUND_UP(fb->height / 2, tile_height); - } -} - static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) { if (INTEL_INFO(dev_priv)->gen >= 9) @@ -2205,16 +2179,15 @@ static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv } } -int -intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, - unsigned int rotation) +struct i915_vma * +intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) { struct drm_device *dev = fb->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct i915_ggtt_view view; + struct i915_vma *vma; u32 alignment; - int ret; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); @@ -2239,75 +2212,112 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, */ intel_runtime_pm_get(dev_priv); - ret = i915_gem_object_pin_to_display_plane(obj, alignment, - &view); - if (ret) - goto err_pm; - - /* Install a fence for tiled scan-out. Pre-i965 always needs a - * fence, whereas 965+ only requires a fence if using - * framebuffer compression. For simplicity, we always install - * a fence as the cost is not that onerous. - */ - if (view.type == I915_GGTT_VIEW_NORMAL) { - ret = i915_gem_object_get_fence(obj); - if (ret == -EDEADLK) { - /* - * -EDEADLK means there are no free fences - * no pending flips. - * - * This is propagated to atomic, but it uses - * -EDEADLK to force a locking recovery, so - * change the returned error to -EBUSY. - */ - ret = -EBUSY; - goto err_unpin; - } else if (ret) - goto err_unpin; + vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view); + if (IS_ERR(vma)) + goto err; - i915_gem_object_pin_fence(obj); + if (i915_vma_is_map_and_fenceable(vma)) { + /* Install a fence for tiled scan-out. Pre-i965 always needs a + * fence, whereas 965+ only requires a fence if using + * framebuffer compression. For simplicity, we always, when + * possible, install a fence as the cost is not that onerous. + * + * If we fail to fence the tiled scanout, then either the + * modeset will reject the change (which is highly unlikely as + * the affected systems, all but one, do not have unmappable + * space) or we will not be able to enable full powersaving + * techniques (also likely not to apply due to various limits + * FBC and the like impose on the size of the buffer, which + * presumably we violated anyway with this unmappable buffer). + * Anyway, it is presumably better to stumble onwards with + * something and try to run the system in a "less than optimal" + * mode that matches the user configuration. + */ + if (i915_vma_get_fence(vma) == 0) + i915_vma_pin_fence(vma); } +err: intel_runtime_pm_put(dev_priv); - return 0; - -err_unpin: - i915_gem_object_unpin_from_display_plane(obj, &view); -err_pm: - intel_runtime_pm_put(dev_priv); - return ret; + return vma; } void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) { struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct i915_ggtt_view view; + struct i915_vma *vma; WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); intel_fill_fb_ggtt_view(&view, fb, rotation); + vma = i915_gem_object_to_ggtt(obj, &view); - if (view.type == I915_GGTT_VIEW_NORMAL) - i915_gem_object_unpin_fence(obj); + i915_vma_unpin_fence(vma); + i915_gem_object_unpin_from_display_plane(vma); +} - i915_gem_object_unpin_from_display_plane(obj, &view); +static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane, + unsigned int rotation) +{ + if (intel_rotation_90_or_270(rotation)) + return to_intel_framebuffer(fb)->rotated[plane].pitch; + else + return fb->pitches[plane]; +} + +/* + * Convert the x/y offsets into a linear offset. + * Only valid with 0/180 degree rotation, which is fine since linear + * offset is only used with linear buffers on pre-hsw and tiled buffers + * with gen2/3, and 90/270 degree rotations isn't supported on any of them. + */ +u32 intel_fb_xy_to_linear(int x, int y, + const struct intel_plane_state *state, + int plane) +{ + const struct drm_framebuffer *fb = state->base.fb; + unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane); + unsigned int pitch = fb->pitches[plane]; + + return y * pitch + x * cpp; +} + +/* + * Add the x/y offsets derived from fb->offsets[] to the user + * specified plane src x/y offsets. The resulting x/y offsets + * specify the start of scanout from the beginning of the gtt mapping. + */ +void intel_add_fb_offsets(int *x, int *y, + const struct intel_plane_state *state, + int plane) + +{ + const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb); + unsigned int rotation = state->base.rotation; + + if (intel_rotation_90_or_270(rotation)) { + *x += intel_fb->rotated[plane].x; + *y += intel_fb->rotated[plane].y; + } else { + *x += intel_fb->normal[plane].x; + *y += intel_fb->normal[plane].y; + } } /* - * Adjust the tile offset by moving the difference into - * the x/y offsets. - * * Input tile dimensions and pitch must already be * rotated to match x and y, and in pixel units. */ -static u32 intel_adjust_tile_offset(int *x, int *y, - unsigned int tile_width, - unsigned int tile_height, - unsigned int tile_size, - unsigned int pitch_tiles, - u32 old_offset, - u32 new_offset) -{ +static u32 _intel_adjust_tile_offset(int *x, int *y, + unsigned int tile_width, + unsigned int tile_height, + unsigned int tile_size, + unsigned int pitch_tiles, + u32 old_offset, + u32 new_offset) +{ + unsigned int pitch_pixels = pitch_tiles * tile_width; unsigned int tiles; WARN_ON(old_offset & (tile_size - 1)); @@ -2319,6 +2329,54 @@ static u32 intel_adjust_tile_offset(int *x, int *y, *y += tiles / pitch_tiles * tile_height; *x += tiles % pitch_tiles * tile_width; + /* minimize x in case it got needlessly big */ + *y += *x / pitch_pixels * tile_height; + *x %= pitch_pixels; + + return new_offset; +} + +/* + * Adjust the tile offset by moving the difference into + * the x/y offsets. + */ +static u32 intel_adjust_tile_offset(int *x, int *y, + const struct intel_plane_state *state, int plane, + u32 old_offset, u32 new_offset) +{ + const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev); + const struct drm_framebuffer *fb = state->base.fb; + unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane); + unsigned int rotation = state->base.rotation; + unsigned int pitch = intel_fb_pitch(fb, plane, rotation); + + WARN_ON(new_offset > old_offset); + + if (fb->modifier[plane] != DRM_FORMAT_MOD_NONE) { + unsigned int tile_size, tile_width, tile_height; + unsigned int pitch_tiles; + + tile_size = intel_tile_size(dev_priv); + intel_tile_dims(dev_priv, &tile_width, &tile_height, + fb->modifier[plane], cpp); + + if (intel_rotation_90_or_270(rotation)) { + pitch_tiles = pitch / tile_height; + swap(tile_width, tile_height); + } else { + pitch_tiles = pitch / (tile_width * cpp); + } + + _intel_adjust_tile_offset(x, y, tile_width, tile_height, + tile_size, pitch_tiles, + old_offset, new_offset); + } else { + old_offset += *y * pitch + *x * cpp; + + *y = (old_offset - new_offset) / pitch; + *x = ((old_offset - new_offset) - *y * pitch) / cpp; + } + return new_offset; } @@ -2329,18 +2387,24 @@ static u32 intel_adjust_tile_offset(int *x, int *y, * In the 90/270 rotated case, x and y are assumed * to be already rotated to match the rotated GTT view, and * pitch is the tile_height aligned framebuffer height. + * + * This function is used when computing the derived information + * under intel_framebuffer, so using any of that information + * here is not allowed. Anything under drm_framebuffer can be + * used. This is why the user has to pass in the pitch since it + * is specified in the rotated orientation. */ -u32 intel_compute_tile_offset(int *x, int *y, - const struct drm_framebuffer *fb, int plane, - unsigned int pitch, - unsigned int rotation) +static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv, + int *x, int *y, + const struct drm_framebuffer *fb, int plane, + unsigned int pitch, + unsigned int rotation, + u32 alignment) { - const struct drm_i915_private *dev_priv = to_i915(fb->dev); uint64_t fb_modifier = fb->modifier[plane]; unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane); - u32 offset, offset_aligned, alignment; + u32 offset, offset_aligned; - alignment = intel_surf_alignment(dev_priv, fb_modifier); if (alignment) alignment--; @@ -2368,9 +2432,9 @@ u32 intel_compute_tile_offset(int *x, int *y, offset = (tile_rows * pitch_tiles + tiles) * tile_size; offset_aligned = offset & ~alignment; - intel_adjust_tile_offset(x, y, tile_width, tile_height, - tile_size, pitch_tiles, - offset, offset_aligned); + _intel_adjust_tile_offset(x, y, tile_width, tile_height, + tile_size, pitch_tiles, + offset, offset_aligned); } else { offset = *y * pitch + *x * cpp; offset_aligned = offset & ~alignment; @@ -2382,6 +2446,177 @@ u32 intel_compute_tile_offset(int *x, int *y, return offset_aligned; } +u32 intel_compute_tile_offset(int *x, int *y, + const struct intel_plane_state *state, + int plane) +{ + const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev); + const struct drm_framebuffer *fb = state->base.fb; + unsigned int rotation = state->base.rotation; + int pitch = intel_fb_pitch(fb, plane, rotation); + u32 alignment; + + /* AUX_DIST needs only 4K alignment */ + if (fb->pixel_format == DRM_FORMAT_NV12 && plane == 1) + alignment = 4096; + else + alignment = intel_surf_alignment(dev_priv, fb->modifier[plane]); + + return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch, + rotation, alignment); +} + +/* Convert the fb->offset[] linear offset into x/y offsets */ +static void intel_fb_offset_to_xy(int *x, int *y, + const struct drm_framebuffer *fb, int plane) +{ + unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane); + unsigned int pitch = fb->pitches[plane]; + u32 linear_offset = fb->offsets[plane]; + + *y = linear_offset / pitch; + *x = linear_offset % pitch / cpp; +} + +static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier) +{ + switch (fb_modifier) { + case I915_FORMAT_MOD_X_TILED: + return I915_TILING_X; + case I915_FORMAT_MOD_Y_TILED: + return I915_TILING_Y; + default: + return I915_TILING_NONE; + } +} + +static int +intel_fill_fb_info(struct drm_i915_private *dev_priv, + struct drm_framebuffer *fb) +{ + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct intel_rotation_info *rot_info = &intel_fb->rot_info; + u32 gtt_offset_rotated = 0; + unsigned int max_size = 0; + uint32_t format = fb->pixel_format; + int i, num_planes = drm_format_num_planes(format); + unsigned int tile_size = intel_tile_size(dev_priv); + + for (i = 0; i < num_planes; i++) { + unsigned int width, height; + unsigned int cpp, size; + u32 offset; + int x, y; + + cpp = drm_format_plane_cpp(format, i); + width = drm_format_plane_width(fb->width, format, i); + height = drm_format_plane_height(fb->height, format, i); + + intel_fb_offset_to_xy(&x, &y, fb, i); + + /* + * The fence (if used) is aligned to the start of the object + * so having the framebuffer wrap around across the edge of the + * fenced region doesn't really work. We have no API to configure + * the fence start offset within the object (nor could we probably + * on gen2/3). So it's just easier if we just require that the + * fb layout agrees with the fence layout. We already check that the + * fb stride matches the fence stride elsewhere. + */ + if (i915_gem_object_is_tiled(intel_fb->obj) && + (x + width) * cpp > fb->pitches[i]) { + DRM_DEBUG("bad fb plane %d offset: 0x%x\n", + i, fb->offsets[i]); + return -EINVAL; + } + + /* + * First pixel of the framebuffer from + * the start of the normal gtt mapping. + */ + intel_fb->normal[i].x = x; + intel_fb->normal[i].y = y; + + offset = _intel_compute_tile_offset(dev_priv, &x, &y, + fb, 0, fb->pitches[i], + DRM_ROTATE_0, tile_size); + offset /= tile_size; + + if (fb->modifier[i] != DRM_FORMAT_MOD_NONE) { + unsigned int tile_width, tile_height; + unsigned int pitch_tiles; + struct drm_rect r; + + intel_tile_dims(dev_priv, &tile_width, &tile_height, + fb->modifier[i], cpp); + + rot_info->plane[i].offset = offset; + rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp); + rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width); + rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height); + + intel_fb->rotated[i].pitch = + rot_info->plane[i].height * tile_height; + + /* how many tiles does this plane need */ + size = rot_info->plane[i].stride * rot_info->plane[i].height; + /* + * If the plane isn't horizontally tile aligned, + * we need one more tile. + */ + if (x != 0) + size++; + + /* rotate the x/y offsets to match the GTT view */ + r.x1 = x; + r.y1 = y; + r.x2 = x + width; + r.y2 = y + height; + drm_rect_rotate(&r, + rot_info->plane[i].width * tile_width, + rot_info->plane[i].height * tile_height, + DRM_ROTATE_270); + x = r.x1; + y = r.y1; + + /* rotate the tile dimensions to match the GTT view */ + pitch_tiles = intel_fb->rotated[i].pitch / tile_height; + swap(tile_width, tile_height); + + /* + * We only keep the x/y offsets, so push all of the + * gtt offset into the x/y offsets. + */ + _intel_adjust_tile_offset(&x, &y, tile_size, + tile_width, tile_height, pitch_tiles, + gtt_offset_rotated * tile_size, 0); + + gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; + + /* + * First pixel of the framebuffer from + * the start of the rotated gtt mapping. + */ + intel_fb->rotated[i].x = x; + intel_fb->rotated[i].y = y; + } else { + size = DIV_ROUND_UP((y + height) * fb->pitches[i] + + x * cpp, tile_size); + } + + /* how many tiles in total needed in the bo */ + max_size = max(max_size, offset + size); + } + + if (max_size * tile_size > to_intel_framebuffer(fb)->obj->base.size) { + DRM_DEBUG("fb too big for bo (need %u bytes, have %zu bytes)\n", + max_size * tile_size, to_intel_framebuffer(fb)->obj->base.size); + return -EINVAL; + } + + return 0; +} + static int i9xx_format_to_fourcc(int format) { switch (format) { @@ -2465,9 +2700,8 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, return false; } - obj->tiling_mode = plane_config->tiling; - if (obj->tiling_mode == I915_TILING_X) - obj->stride = fb->pitches[0]; + if (plane_config->tiling == I915_TILING_X) + obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X; mode_cmd.pixel_format = fb->pixel_format; mode_cmd.width = fb->width; @@ -2488,7 +2722,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, return true; out_unref_obj: - drm_gem_object_unreference(&obj->base); + i915_gem_object_put(obj); mutex_unlock(&dev->struct_mutex); return false; } @@ -2552,7 +2786,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, continue; obj = intel_fb_obj(fb); - if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { + if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) { drm_framebuffer_reference(fb); goto valid_fb; } @@ -2565,7 +2799,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, * simplest solution is to just disable the primary plane now and * pretend the BIOS never had it enabled. */ - to_intel_plane_state(plane_state)->visible = false; + to_intel_plane_state(plane_state)->base.visible = false; crtc_state->plane_mask &= ~(1 << drm_plane_index(primary)); intel_pre_disable_primary_noatomic(&intel_crtc->base); intel_plane->disable_plane(primary, &intel_crtc->base); @@ -2583,24 +2817,188 @@ valid_fb: plane_state->crtc_w = fb->width; plane_state->crtc_h = fb->height; - intel_state->src.x1 = plane_state->src_x; - intel_state->src.y1 = plane_state->src_y; - intel_state->src.x2 = plane_state->src_x + plane_state->src_w; - intel_state->src.y2 = plane_state->src_y + plane_state->src_h; - intel_state->dst.x1 = plane_state->crtc_x; - intel_state->dst.y1 = plane_state->crtc_y; - intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w; - intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h; + intel_state->base.src.x1 = plane_state->src_x; + intel_state->base.src.y1 = plane_state->src_y; + intel_state->base.src.x2 = plane_state->src_x + plane_state->src_w; + intel_state->base.src.y2 = plane_state->src_y + plane_state->src_h; + intel_state->base.dst.x1 = plane_state->crtc_x; + intel_state->base.dst.y1 = plane_state->crtc_y; + intel_state->base.dst.x2 = plane_state->crtc_x + plane_state->crtc_w; + intel_state->base.dst.y2 = plane_state->crtc_y + plane_state->crtc_h; obj = intel_fb_obj(fb); - if (obj->tiling_mode != I915_TILING_NONE) + if (i915_gem_object_is_tiled(obj)) dev_priv->preserve_bios_swizzle = true; drm_framebuffer_reference(fb); primary->fb = primary->state->fb = fb; primary->crtc = primary->state->crtc = &intel_crtc->base; intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary)); - obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit; + atomic_or(to_intel_plane(primary)->frontbuffer_bit, + &obj->frontbuffer_bits); +} + +static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane, + unsigned int rotation) +{ + int cpp = drm_format_plane_cpp(fb->pixel_format, plane); + + switch (fb->modifier[plane]) { + case DRM_FORMAT_MOD_NONE: + case I915_FORMAT_MOD_X_TILED: + switch (cpp) { + case 8: + return 4096; + case 4: + case 2: + case 1: + return 8192; + default: + MISSING_CASE(cpp); + break; + } + break; + case I915_FORMAT_MOD_Y_TILED: + case I915_FORMAT_MOD_Yf_TILED: + switch (cpp) { + case 8: + return 2048; + case 4: + return 4096; + case 2: + case 1: + return 8192; + default: + MISSING_CASE(cpp); + break; + } + break; + default: + MISSING_CASE(fb->modifier[plane]); + } + + return 2048; +} + +static int skl_check_main_surface(struct intel_plane_state *plane_state) +{ + const struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev); + const struct drm_framebuffer *fb = plane_state->base.fb; + unsigned int rotation = plane_state->base.rotation; + int x = plane_state->base.src.x1 >> 16; + int y = plane_state->base.src.y1 >> 16; + int w = drm_rect_width(&plane_state->base.src) >> 16; + int h = drm_rect_height(&plane_state->base.src) >> 16; + int max_width = skl_max_plane_width(fb, 0, rotation); + int max_height = 4096; + u32 alignment, offset, aux_offset = plane_state->aux.offset; + + if (w > max_width || h > max_height) { + DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n", + w, h, max_width, max_height); + return -EINVAL; + } + + intel_add_fb_offsets(&x, &y, plane_state, 0); + offset = intel_compute_tile_offset(&x, &y, plane_state, 0); + + alignment = intel_surf_alignment(dev_priv, fb->modifier[0]); + + /* + * AUX surface offset is specified as the distance from the + * main surface offset, and it must be non-negative. Make + * sure that is what we will get. + */ + if (offset > aux_offset) + offset = intel_adjust_tile_offset(&x, &y, plane_state, 0, + offset, aux_offset & ~(alignment - 1)); + + /* + * When using an X-tiled surface, the plane blows up + * if the x offset + width exceed the stride. + * + * TODO: linear and Y-tiled seem fine, Yf untested, + */ + if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) { + int cpp = drm_format_plane_cpp(fb->pixel_format, 0); + + while ((x + w) * cpp > fb->pitches[0]) { + if (offset == 0) { + DRM_DEBUG_KMS("Unable to find suitable display surface offset\n"); + return -EINVAL; + } + + offset = intel_adjust_tile_offset(&x, &y, plane_state, 0, + offset, offset - alignment); + } + } + + plane_state->main.offset = offset; + plane_state->main.x = x; + plane_state->main.y = y; + + return 0; +} + +static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) +{ + const struct drm_framebuffer *fb = plane_state->base.fb; + unsigned int rotation = plane_state->base.rotation; + int max_width = skl_max_plane_width(fb, 1, rotation); + int max_height = 4096; + int x = plane_state->base.src.x1 >> 17; + int y = plane_state->base.src.y1 >> 17; + int w = drm_rect_width(&plane_state->base.src) >> 17; + int h = drm_rect_height(&plane_state->base.src) >> 17; + u32 offset; + + intel_add_fb_offsets(&x, &y, plane_state, 1); + offset = intel_compute_tile_offset(&x, &y, plane_state, 1); + + /* FIXME not quite sure how/if these apply to the chroma plane */ + if (w > max_width || h > max_height) { + DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n", + w, h, max_width, max_height); + return -EINVAL; + } + + plane_state->aux.offset = offset; + plane_state->aux.x = x; + plane_state->aux.y = y; + + return 0; +} + +int skl_check_plane_surface(struct intel_plane_state *plane_state) +{ + const struct drm_framebuffer *fb = plane_state->base.fb; + unsigned int rotation = plane_state->base.rotation; + int ret; + + /* Rotate src coordinates to match rotated GTT view */ + if (intel_rotation_90_or_270(rotation)) + drm_rect_rotate(&plane_state->base.src, + fb->width, fb->height, DRM_ROTATE_270); + + /* + * Handle the AUX surface first since + * the main surface setup depends on it. + */ + if (fb->pixel_format == DRM_FORMAT_NV12) { + ret = skl_check_nv12_aux_surface(plane_state); + if (ret) + return ret; + } else { + plane_state->aux.offset = ~0xfff; + plane_state->aux.x = 0; + plane_state->aux.y = 0; + } + + ret = skl_check_main_surface(plane_state); + if (ret) + return ret; + + return 0; } static void i9xx_update_primary_plane(struct drm_plane *primary, @@ -2617,9 +3015,8 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, u32 dspcntr; i915_reg_t reg = DSPCNTR(plane); unsigned int rotation = plane_state->base.rotation; - int cpp = drm_format_plane_cpp(fb->pixel_format, 0); - int x = plane_state->src.x1 >> 16; - int y = plane_state->src.y1 >> 16; + int x = plane_state->base.src.x1 >> 16; + int y = plane_state->base.src.y1 >> 16; dspcntr = DISPPLANE_GAMMA_ENABLE; @@ -2670,37 +3067,31 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, BUG(); } - if (INTEL_INFO(dev)->gen >= 4 && - obj->tiling_mode != I915_TILING_NONE) + if (INTEL_GEN(dev_priv) >= 4 && + fb->modifier[0] == I915_FORMAT_MOD_X_TILED) dspcntr |= DISPPLANE_TILED; if (IS_G4X(dev)) dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; - linear_offset = y * fb->pitches[0] + x * cpp; + intel_add_fb_offsets(&x, &y, plane_state, 0); - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_INFO(dev)->gen >= 4) intel_crtc->dspaddr_offset = - intel_compute_tile_offset(&x, &y, fb, 0, - fb->pitches[0], rotation); - linear_offset -= intel_crtc->dspaddr_offset; - } else { - intel_crtc->dspaddr_offset = linear_offset; - } + intel_compute_tile_offset(&x, &y, plane_state, 0); - if (rotation == BIT(DRM_ROTATE_180)) { + if (rotation == DRM_ROTATE_180) { dspcntr |= DISPPLANE_ROTATE_180; x += (crtc_state->pipe_src_w - 1); y += (crtc_state->pipe_src_h - 1); - - /* Finding the last pixel of the last line of the display - data and adding to linear_offset*/ - linear_offset += - (crtc_state->pipe_src_h - 1) * fb->pitches[0] + - (crtc_state->pipe_src_w - 1) * cpp; } + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + + if (INTEL_INFO(dev)->gen < 4) + intel_crtc->dspaddr_offset = linear_offset; + intel_crtc->adjusted_x = x; intel_crtc->adjusted_y = y; @@ -2709,11 +3100,12 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); if (INTEL_INFO(dev)->gen >= 4) { I915_WRITE(DSPSURF(plane), - i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); + intel_fb_gtt_offset(fb, rotation) + + intel_crtc->dspaddr_offset); I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); I915_WRITE(DSPLINOFF(plane), linear_offset); } else - I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset); + I915_WRITE(DSPADDR(plane), i915_gem_object_ggtt_offset(obj, NULL) + linear_offset); POSTING_READ(reg); } @@ -2741,15 +3133,13 @@ static void ironlake_update_primary_plane(struct drm_plane *primary, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_framebuffer *fb = plane_state->base.fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); int plane = intel_crtc->plane; u32 linear_offset; u32 dspcntr; i915_reg_t reg = DSPCNTR(plane); unsigned int rotation = plane_state->base.rotation; - int cpp = drm_format_plane_cpp(fb->pixel_format, 0); - int x = plane_state->src.x1 >> 16; - int y = plane_state->src.y1 >> 16; + int x = plane_state->base.src.x1 >> 16; + int y = plane_state->base.src.y1 >> 16; dspcntr = DISPPLANE_GAMMA_ENABLE; dspcntr |= DISPLAY_PLANE_ENABLE; @@ -2780,32 +3170,28 @@ static void ironlake_update_primary_plane(struct drm_plane *primary, BUG(); } - if (obj->tiling_mode != I915_TILING_NONE) + if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) dspcntr |= DISPPLANE_TILED; if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; - linear_offset = y * fb->pitches[0] + x * cpp; + intel_add_fb_offsets(&x, &y, plane_state, 0); + intel_crtc->dspaddr_offset = - intel_compute_tile_offset(&x, &y, fb, 0, - fb->pitches[0], rotation); - linear_offset -= intel_crtc->dspaddr_offset; - if (rotation == BIT(DRM_ROTATE_180)) { + intel_compute_tile_offset(&x, &y, plane_state, 0); + + if (rotation == DRM_ROTATE_180) { dspcntr |= DISPPLANE_ROTATE_180; if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { x += (crtc_state->pipe_src_w - 1); y += (crtc_state->pipe_src_h - 1); - - /* Finding the last pixel of the last line of the display - data and adding to linear_offset*/ - linear_offset += - (crtc_state->pipe_src_h - 1) * fb->pitches[0] + - (crtc_state->pipe_src_w - 1) * cpp; } } + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + intel_crtc->adjusted_x = x; intel_crtc->adjusted_y = y; @@ -2813,7 +3199,8 @@ static void ironlake_update_primary_plane(struct drm_plane *primary, I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); I915_WRITE(DSPSURF(plane), - i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); + intel_fb_gtt_offset(fb, rotation) + + intel_crtc->dspaddr_offset); if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { I915_WRITE(DSPOFFSET(plane), (y << 16) | x); } else { @@ -2835,32 +3222,21 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv, } } -u32 intel_plane_obj_offset(struct intel_plane *intel_plane, - struct drm_i915_gem_object *obj, - unsigned int plane) +u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, + unsigned int rotation) { + struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct i915_ggtt_view view; struct i915_vma *vma; - u64 offset; - intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb, - intel_plane->base.state->rotation); + intel_fill_fb_ggtt_view(&view, fb, rotation); - vma = i915_gem_obj_to_ggtt_view(obj, &view); + vma = i915_gem_object_to_ggtt(obj, &view); if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n", - view.type)) + view.type)) return -1; - offset = vma->node.start; - - if (plane == 1) { - offset += vma->ggtt_view.params.rotated.uv_start_page * - PAGE_SIZE; - } - - WARN_ON(upper_32_bits(offset)); - - return lower_32_bits(offset); + return i915_ggtt_offset(vma); } static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) @@ -2890,6 +3266,28 @@ static void skl_detach_scalers(struct intel_crtc *intel_crtc) } } +u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane, + unsigned int rotation) +{ + const struct drm_i915_private *dev_priv = to_i915(fb->dev); + u32 stride = intel_fb_pitch(fb, plane, rotation); + + /* + * The stride is either expressed as a multiple of 64 bytes chunks for + * linear buffers or in number of tiles for tiled buffers. + */ + if (intel_rotation_90_or_270(rotation)) { + int cpp = drm_format_plane_cpp(fb->pixel_format, plane); + + stride /= intel_tile_height(dev_priv, fb->modifier[0], cpp); + } else { + stride /= intel_fb_stride_alignment(dev_priv, fb->modifier[0], + fb->pixel_format); + } + + return stride; +} + u32 skl_plane_ctl_format(uint32_t pixel_format) { switch (pixel_format) { @@ -2952,17 +3350,17 @@ u32 skl_plane_ctl_tiling(uint64_t fb_modifier) u32 skl_plane_ctl_rotation(unsigned int rotation) { switch (rotation) { - case BIT(DRM_ROTATE_0): + case DRM_ROTATE_0: break; /* * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr * while i915 HW rotation is clockwise, thats why this swapping. */ - case BIT(DRM_ROTATE_90): + case DRM_ROTATE_90: return PLANE_CTL_ROTATE_270; - case BIT(DRM_ROTATE_180): + case DRM_ROTATE_180: return PLANE_CTL_ROTATE_180; - case BIT(DRM_ROTATE_270): + case DRM_ROTATE_270: return PLANE_CTL_ROTATE_90; default: MISSING_CASE(rotation); @@ -2979,22 +3377,20 @@ static void skylake_update_primary_plane(struct drm_plane *plane, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_framebuffer *fb = plane_state->base.fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); int pipe = intel_crtc->pipe; - u32 plane_ctl, stride_div, stride; - u32 tile_height, plane_offset, plane_size; + u32 plane_ctl; unsigned int rotation = plane_state->base.rotation; - int x_offset, y_offset; - u32 surf_addr; + u32 stride = skl_plane_stride(fb, 0, rotation); + u32 surf_addr = plane_state->main.offset; int scaler_id = plane_state->scaler_id; - int src_x = plane_state->src.x1 >> 16; - int src_y = plane_state->src.y1 >> 16; - int src_w = drm_rect_width(&plane_state->src) >> 16; - int src_h = drm_rect_height(&plane_state->src) >> 16; - int dst_x = plane_state->dst.x1; - int dst_y = plane_state->dst.y1; - int dst_w = drm_rect_width(&plane_state->dst); - int dst_h = drm_rect_height(&plane_state->dst); + int src_x = plane_state->main.x; + int src_y = plane_state->main.y; + int src_w = drm_rect_width(&plane_state->base.src) >> 16; + int src_h = drm_rect_height(&plane_state->base.src) >> 16; + int dst_x = plane_state->base.dst.x1; + int dst_y = plane_state->base.dst.y1; + int dst_w = drm_rect_width(&plane_state->base.dst); + int dst_h = drm_rect_height(&plane_state->base.dst); plane_ctl = PLANE_CTL_ENABLE | PLANE_CTL_PIPE_GAMMA_ENABLE | @@ -3005,36 +3401,19 @@ static void skylake_update_primary_plane(struct drm_plane *plane, plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; plane_ctl |= skl_plane_ctl_rotation(rotation); - stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0], - fb->pixel_format); - surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0); - - WARN_ON(drm_rect_width(&plane_state->src) == 0); + /* Sizes are 0 based */ + src_w--; + src_h--; + dst_w--; + dst_h--; - if (intel_rotation_90_or_270(rotation)) { - int cpp = drm_format_plane_cpp(fb->pixel_format, 0); - - /* stride = Surface height in tiles */ - tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp); - stride = DIV_ROUND_UP(fb->height, tile_height); - x_offset = stride * tile_height - src_y - src_h; - y_offset = src_x; - plane_size = (src_w - 1) << 16 | (src_h - 1); - } else { - stride = fb->pitches[0] / stride_div; - x_offset = src_x; - y_offset = src_y; - plane_size = (src_h - 1) << 16 | (src_w - 1); - } - plane_offset = y_offset << 16 | x_offset; - - intel_crtc->adjusted_x = x_offset; - intel_crtc->adjusted_y = y_offset; + intel_crtc->adjusted_x = src_x; + intel_crtc->adjusted_y = src_y; I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); - I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset); - I915_WRITE(PLANE_SIZE(pipe, 0), plane_size); + I915_WRITE(PLANE_OFFSET(pipe, 0), (src_y << 16) | src_x); I915_WRITE(PLANE_STRIDE(pipe, 0), stride); + I915_WRITE(PLANE_SIZE(pipe, 0), (src_h << 16) | src_w); if (scaler_id >= 0) { uint32_t ps_ctrl = 0; @@ -3051,7 +3430,8 @@ static void skylake_update_primary_plane(struct drm_plane *plane, I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x); } - I915_WRITE(PLANE_SURF(pipe, 0), surf_addr); + I915_WRITE(PLANE_SURF(pipe, 0), + intel_fb_gtt_offset(fb, rotation) + surf_addr); POSTING_READ(PLANE_SURF(pipe, 0)); } @@ -3093,40 +3473,113 @@ static void intel_update_primary_planes(struct drm_device *dev) for_each_crtc(dev, crtc) { struct intel_plane *plane = to_intel_plane(crtc->primary); - struct intel_plane_state *plane_state; - - drm_modeset_lock_crtc(crtc, &plane->base); - plane_state = to_intel_plane_state(plane->base.state); + struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); - if (plane_state->visible) + if (plane_state->base.visible) plane->update_plane(&plane->base, to_intel_crtc_state(crtc->state), plane_state); + } +} + +static int +__intel_display_resume(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + int i, ret; - drm_modeset_unlock_crtc(crtc); + intel_modeset_setup_hw_state(dev); + i915_redisable_vga(dev); + + if (!state) + return 0; + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + /* + * Force recalculation even if we restore + * current state. With fast modeset this may not result + * in a modeset when the state is compatible. + */ + crtc_state->mode_changed = true; } + + /* ignore any reset values/BIOS leftovers in the WM registers */ + to_intel_atomic_state(state)->skip_intermediate_wm = true; + + ret = drm_atomic_commit(state); + + WARN_ON(ret == -EDEADLK); + return ret; +} + +static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) +{ + return intel_has_gpu_reset(dev_priv) && + INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv); } void intel_prepare_reset(struct drm_i915_private *dev_priv) { - /* no reset support for gen2 */ - if (IS_GEN2(dev_priv)) - return; + struct drm_device *dev = &dev_priv->drm; + struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; + struct drm_atomic_state *state; + int ret; - /* reset doesn't touch the display */ - if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) + /* + * Need mode_config.mutex so that we don't + * trample ongoing ->detect() and whatnot. + */ + mutex_lock(&dev->mode_config.mutex); + drm_modeset_acquire_init(ctx, 0); + while (1) { + ret = drm_modeset_lock_all_ctx(dev, ctx); + if (ret != -EDEADLK) + break; + + drm_modeset_backoff(ctx); + } + + /* reset doesn't touch the display, but flips might get nuked anyway, */ + if (!i915.force_reset_modeset_test && + !gpu_reset_clobbers_display(dev_priv)) return; - drm_modeset_lock_all(&dev_priv->drm); /* * Disabling the crtcs gracefully seems nicer. Also the * g33 docs say we should at least disable all the planes. */ - intel_display_suspend(&dev_priv->drm); + state = drm_atomic_helper_duplicate_state(dev, ctx); + if (IS_ERR(state)) { + ret = PTR_ERR(state); + state = NULL; + DRM_ERROR("Duplicating state failed with %i\n", ret); + goto err; + } + + ret = drm_atomic_helper_disable_all(dev, ctx); + if (ret) { + DRM_ERROR("Suspending crtc's failed with %i\n", ret); + goto err; + } + + dev_priv->modeset_restore_state = state; + state->acquire_ctx = ctx; + return; + +err: + drm_atomic_state_free(state); } void intel_finish_reset(struct drm_i915_private *dev_priv) { + struct drm_device *dev = &dev_priv->drm; + struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; + struct drm_atomic_state *state = dev_priv->modeset_restore_state; + int ret; + /* * Flips in the rings will be nuked by the reset, * so complete all pending flips so that user space @@ -3134,44 +3587,51 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) */ intel_complete_page_flips(dev_priv); - /* no reset support for gen2 */ - if (IS_GEN2(dev_priv)) - return; + dev_priv->modeset_restore_state = NULL; /* reset doesn't touch the display */ - if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { + if (!gpu_reset_clobbers_display(dev_priv)) { + if (!state) { + /* + * Flips in the rings have been nuked by the reset, + * so update the base address of all primary + * planes to the the last fb to make sure we're + * showing the correct fb after a reset. + * + * FIXME: Atomic will make this obsolete since we won't schedule + * CS-based flips (which might get lost in gpu resets) any more. + */ + intel_update_primary_planes(dev); + } else { + ret = __intel_display_resume(dev, state); + if (ret) + DRM_ERROR("Restoring old state failed with %i\n", ret); + } + } else { /* - * Flips in the rings have been nuked by the reset, - * so update the base address of all primary - * planes to the the last fb to make sure we're - * showing the correct fb after a reset. - * - * FIXME: Atomic will make this obsolete since we won't schedule - * CS-based flips (which might get lost in gpu resets) any more. + * The display has been reset as well, + * so need a full re-initialization. */ - intel_update_primary_planes(&dev_priv->drm); - return; - } - - /* - * The display has been reset as well, - * so need a full re-initialization. - */ - intel_runtime_pm_disable_interrupts(dev_priv); - intel_runtime_pm_enable_interrupts(dev_priv); + intel_runtime_pm_disable_interrupts(dev_priv); + intel_runtime_pm_enable_interrupts(dev_priv); - intel_modeset_init_hw(&dev_priv->drm); + intel_modeset_init_hw(dev); - spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev_priv); - spin_unlock_irq(&dev_priv->irq_lock); + spin_lock_irq(&dev_priv->irq_lock); + if (dev_priv->display.hpd_irq_setup) + dev_priv->display.hpd_irq_setup(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); - intel_display_resume(&dev_priv->drm); + ret = __intel_display_resume(dev, state); + if (ret) + DRM_ERROR("Restoring old state failed with %i\n", ret); - intel_hpd_init(dev_priv); + intel_hpd_init(dev_priv); + } - drm_modeset_unlock_all(&dev_priv->drm); + drm_modeset_drop_locks(ctx); + drm_modeset_acquire_fini(ctx); + mutex_unlock(&dev->mode_config.mutex); } static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) @@ -4248,7 +4708,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state) intel_crtc->pipe, SKL_CRTC_INDEX); return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, - &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0), + &state->scaler_state.scaler_id, DRM_ROTATE_0, state->pipe_src_w, state->pipe_src_h, adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay); } @@ -4273,7 +4733,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, struct drm_framebuffer *fb = plane_state->base.fb; int ret; - bool force_detach = !fb || !plane_state->visible; + bool force_detach = !fb || !plane_state->base.visible; DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n", intel_plane->base.base.id, intel_plane->base.name, @@ -4283,10 +4743,10 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, drm_plane_index(&intel_plane->base), &plane_state->scaler_id, plane_state->base.rotation, - drm_rect_width(&plane_state->src) >> 16, - drm_rect_height(&plane_state->src) >> 16, - drm_rect_width(&plane_state->dst), - drm_rect_height(&plane_state->dst)); + drm_rect_width(&plane_state->base.src) >> 16, + drm_rect_height(&plane_state->base.src) >> 16, + drm_rect_width(&plane_state->base.dst), + drm_rect_height(&plane_state->base.dst)); if (ret || plane_state->scaler_id < 0) return ret; @@ -4564,12 +5024,11 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) struct drm_atomic_state *old_state = old_crtc_state->base.state; struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->base.state); - struct drm_device *dev = crtc->base.dev; struct drm_plane *primary = crtc->base.primary; struct drm_plane_state *old_pri_state = drm_atomic_get_existing_plane_state(old_state, primary); - intel_frontbuffer_flip(dev, pipe_config->fb_bits); + intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits); crtc->wm.cxsr_allowed = true; @@ -4584,9 +5043,9 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) intel_fbc_post_update(crtc); - if (primary_state->visible && + if (primary_state->base.visible && (needs_modeset(&pipe_config->base) || - !old_primary_state->visible)) + !old_primary_state->base.visible)) intel_post_enable_primary(&crtc->base); } } @@ -4612,8 +5071,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) intel_fbc_pre_update(crtc, pipe_config, primary_state); - if (old_primary_state->visible && - (modeset || !primary_state->visible)) + if (old_primary_state->base.visible && + (modeset || !primary_state->base.visible)) intel_pre_disable_primary(&crtc->base); } @@ -4692,7 +5151,7 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask * to compute the mask of flip planes precisely. For the time being * consider this a flip to a NULL plane. */ - intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); + intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe)); } static void ironlake_crtc_enable(struct drm_crtc *crtc) @@ -5691,15 +6150,7 @@ static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv) static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv) { - unsigned int i; - - for (i = 0; i < 15; i++) { - if (skl_cdclk_pcu_ready(dev_priv)) - return true; - udelay(10); - } - - return false; + return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0; } static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco) @@ -6298,13 +6749,13 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) if (!intel_crtc->active) return; - if (to_intel_plane_state(crtc->primary->state)->visible) { + if (to_intel_plane_state(crtc->primary->state)->base.visible) { WARN_ON(intel_crtc->flip_work); intel_pre_disable_primary_noatomic(crtc); intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary)); - to_intel_plane_state(crtc->primary->state)->visible = false; + to_intel_plane_state(crtc->primary->state)->base.visible = false; } dev_priv->display.crtc_disable(crtc); @@ -9420,7 +9871,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); - I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); + I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n"); I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, "CPU PWM1 enabled\n"); if (IS_HASWELL(dev)) @@ -10178,7 +10629,7 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t cntl = 0, size = 0; - if (plane_state && plane_state->visible) { + if (plane_state && plane_state->base.visible) { unsigned int width = plane_state->base.crtc_w; unsigned int height = plane_state->base.crtc_h; unsigned int stride = roundup_pow_of_two(width) * 4; @@ -10242,7 +10693,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, int pipe = intel_crtc->pipe; uint32_t cntl = 0; - if (plane_state && plane_state->visible) { + if (plane_state && plane_state->base.visible) { cntl = MCURSOR_GAMMA_ENABLE; switch (plane_state->base.crtc_w) { case 64: @@ -10263,7 +10714,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, if (HAS_DDI(dev)) cntl |= CURSOR_PIPE_CSC_ENABLE; - if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) + if (plane_state->base.rotation == DRM_ROTATE_180) cntl |= CURSOR_ROTATE_180; } @@ -10309,7 +10760,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, /* ILK+ do this automagically */ if (HAS_GMCH_DISPLAY(dev) && - plane_state->base.rotation == BIT(DRM_ROTATE_180)) { + plane_state->base.rotation == DRM_ROTATE_180) { base += (plane_state->base.crtc_h * plane_state->base.crtc_w - 1) * 4; } @@ -10442,7 +10893,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev, fb = intel_framebuffer_create(dev, &mode_cmd, obj); if (IS_ERR(fb)) - drm_gem_object_unreference_unlocked(&obj->base); + i915_gem_object_put_unlocked(obj); return fb; } @@ -10953,13 +11404,13 @@ static void intel_unpin_work_fn(struct work_struct *__work) mutex_lock(&dev->struct_mutex); intel_unpin_fb_obj(work->old_fb, primary->state->rotation); - drm_gem_object_unreference(&work->pending_flip_obj->base); - - if (work->flip_queued_req) - i915_gem_request_assign(&work->flip_queued_req, NULL); + i915_gem_object_put(work->pending_flip_obj); mutex_unlock(&dev->struct_mutex); - intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit); + i915_gem_request_put(work->flip_queued_req); + + intel_frontbuffer_flip_complete(to_i915(dev), + to_intel_plane(primary)->frontbuffer_bit); intel_fbc_post_update(crtc); drm_framebuffer_unreference(work->old_fb); @@ -11124,7 +11575,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); u32 flip_mask; int ret; @@ -11140,13 +11591,13 @@ static int intel_gen2_queue_flip(struct drm_device *dev, flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; else flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; - intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask); - intel_ring_emit(engine, MI_NOOP); - intel_ring_emit(engine, MI_DISPLAY_FLIP | + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - intel_ring_emit(engine, fb->pitches[0]); - intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); - intel_ring_emit(engine, 0); /* aux display base address, unused */ + intel_ring_emit(ring, fb->pitches[0]); + intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset); + intel_ring_emit(ring, 0); /* aux display base address, unused */ return 0; } @@ -11158,7 +11609,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); u32 flip_mask; int ret; @@ -11171,13 +11622,13 @@ static int intel_gen3_queue_flip(struct drm_device *dev, flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; else flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; - intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask); - intel_ring_emit(engine, MI_NOOP); - intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - intel_ring_emit(engine, fb->pitches[0]); - intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); - intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(ring, fb->pitches[0]); + intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset); + intel_ring_emit(ring, MI_NOOP); return 0; } @@ -11189,7 +11640,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t pf, pipesrc; @@ -11203,11 +11654,11 @@ static int intel_gen4_queue_flip(struct drm_device *dev, * Display Registers (which do not change across a page-flip) * so we need only reprogram the base address. */ - intel_ring_emit(engine, MI_DISPLAY_FLIP | + intel_ring_emit(ring, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - intel_ring_emit(engine, fb->pitches[0]); - intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset | - obj->tiling_mode); + intel_ring_emit(ring, fb->pitches[0]); + intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset | + intel_fb_modifier_to_tiling(fb->modifier[0])); /* XXX Enabling the panel-fitter across page-flip is so far * untested on non-native modes, so ignore it for now. @@ -11215,7 +11666,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, */ pf = 0; pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; - intel_ring_emit(engine, pf | pipesrc); + intel_ring_emit(ring, pf | pipesrc); return 0; } @@ -11227,7 +11678,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t pf, pipesrc; @@ -11237,10 +11688,11 @@ static int intel_gen6_queue_flip(struct drm_device *dev, if (ret) return ret; - intel_ring_emit(engine, MI_DISPLAY_FLIP | + intel_ring_emit(ring, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode); - intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); + intel_ring_emit(ring, fb->pitches[0] | + intel_fb_modifier_to_tiling(fb->modifier[0])); + intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset); /* Contrary to the suggestions in the documentation, * "Enable Panel Fitter" does not seem to be required when page @@ -11250,7 +11702,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, */ pf = 0; pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; - intel_ring_emit(engine, pf | pipesrc); + intel_ring_emit(ring, pf | pipesrc); return 0; } @@ -11262,7 +11714,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t plane_bit = 0; int len, ret; @@ -11283,7 +11735,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, } len = 4; - if (engine->id == RCS) { + if (req->engine->id == RCS) { len += 6; /* * On Gen 8, SRM is now taking an extra dword to accommodate @@ -11321,30 +11773,32 @@ static int intel_gen7_queue_flip(struct drm_device *dev, * for the RCS also doesn't appear to drop events. Setting the DERRMR * to zero does lead to lockups within MI_DISPLAY_FLIP. */ - if (engine->id == RCS) { - intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(engine, DERRMR); - intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE | + if (req->engine->id == RCS) { + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit_reg(ring, DERRMR); + intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | DERRMR_PIPEB_PRI_FLIP_DONE | DERRMR_PIPEC_PRI_FLIP_DONE)); if (IS_GEN8(dev)) - intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 | + intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT); else - intel_ring_emit(engine, MI_STORE_REGISTER_MEM | + intel_ring_emit(ring, MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT); - intel_ring_emit_reg(engine, DERRMR); - intel_ring_emit(engine, engine->scratch.gtt_offset + 256); + intel_ring_emit_reg(ring, DERRMR); + intel_ring_emit(ring, + i915_ggtt_offset(req->engine->scratch) + 256); if (IS_GEN8(dev)) { - intel_ring_emit(engine, 0); - intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_NOOP); } } - intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit); - intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode)); - intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); - intel_ring_emit(engine, (MI_NOOP)); + intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); + intel_ring_emit(ring, fb->pitches[0] | + intel_fb_modifier_to_tiling(fb->modifier[0])); + intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset); + intel_ring_emit(ring, (MI_NOOP)); return 0; } @@ -11379,7 +11833,8 @@ static bool use_mmio_flip(struct intel_engine_cs *engine, if (resv && !reservation_object_test_signaled_rcu(resv, false)) return true; - return engine != i915_gem_request_get_engine(obj->last_write_req); + return engine != i915_gem_active_get_engine(&obj->last_write, + &obj->base.dev->struct_mutex); } static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, @@ -11390,7 +11845,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, struct drm_i915_private *dev_priv = to_i915(dev); struct drm_framebuffer *fb = intel_crtc->base.primary->fb; const enum pipe pipe = intel_crtc->pipe; - u32 ctl, stride, tile_height; + u32 ctl, stride = skl_plane_stride(fb, 0, rotation); ctl = I915_READ(PLANE_CTL(pipe, 0)); ctl &= ~PLANE_CTL_TILED_MASK; @@ -11411,20 +11866,6 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, } /* - * The stride is either expressed as a multiple of 64 bytes chunks for - * linear buffers or in number of tiles for tiled buffers. - */ - if (intel_rotation_90_or_270(rotation)) { - /* stride = Surface height in tiles */ - tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0); - stride = DIV_ROUND_UP(fb->height, tile_height); - } else { - stride = fb->pitches[0] / - intel_fb_stride_alignment(dev_priv, fb->modifier[0], - fb->pixel_format); - } - - /* * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on * PLANE_SURF updates, the update is then guaranteed to be atomic. */ @@ -11440,15 +11881,13 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, { struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_framebuffer *intel_fb = - to_intel_framebuffer(intel_crtc->base.primary->fb); - struct drm_i915_gem_object *obj = intel_fb->obj; + struct drm_framebuffer *fb = intel_crtc->base.primary->fb; i915_reg_t reg = DSPCNTR(intel_crtc->plane); u32 dspcntr; dspcntr = I915_READ(reg); - if (obj->tiling_mode != I915_TILING_NONE) + if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) dspcntr |= DISPPLANE_TILED; else dspcntr &= ~DISPPLANE_TILED; @@ -11471,9 +11910,9 @@ static void intel_mmio_flip_work_func(struct work_struct *w) struct reservation_object *resv; if (work->flip_queued_req) - WARN_ON(__i915_wait_request(work->flip_queued_req, - false, NULL, - &dev_priv->rps.mmioflips)); + WARN_ON(i915_wait_request(work->flip_queued_req, + false, NULL, + NO_WAITBOOST)); /* For framebuffer backed by dmabuf, wait for fence */ resv = i915_gem_object_get_dmabuf_resv(obj); @@ -11584,7 +12023,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, struct intel_flip_work *work; struct intel_engine_cs *engine; bool mmio_flip; - struct drm_i915_gem_request *request = NULL; + struct drm_i915_gem_request *request; + struct i915_vma *vma; int ret; /* @@ -11650,7 +12090,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, /* Reference the objects for the scheduled work. */ drm_framebuffer_reference(work->old_fb); - drm_gem_object_reference(&obj->base); crtc->primary->fb = fb; update_state_fb(crtc->primary); @@ -11658,7 +12097,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, intel_fbc_pre_update(intel_crtc, intel_crtc->config, to_intel_plane_state(primary->state)); - work->pending_flip_obj = obj; + work->pending_flip_obj = i915_gem_object_get(obj); ret = i915_mutex_lock_interruptible(dev); if (ret) @@ -11677,13 +12116,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { engine = &dev_priv->engine[BCS]; - if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode) + if (fb->modifier[0] != old_fb->modifier[0]) /* vlv: DISPLAY_FLIP fails to change tiling */ engine = NULL; } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { engine = &dev_priv->engine[BCS]; } else if (INTEL_INFO(dev)->gen >= 7) { - engine = i915_gem_request_get_engine(obj->last_write_req); + engine = i915_gem_active_get_engine(&obj->last_write, + &obj->base.dev->struct_mutex); if (engine == NULL || engine->id != RCS) engine = &dev_priv->engine[BCS]; } else { @@ -11692,47 +12132,41 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, mmio_flip = use_mmio_flip(engine, obj); - /* When using CS flips, we want to emit semaphores between rings. - * However, when using mmio flips we will create a task to do the - * synchronisation, so all we want here is to pin the framebuffer - * into the display plane and skip any waits. - */ - if (!mmio_flip) { - ret = i915_gem_object_sync(obj, engine, &request); - if (!ret && !request) { - request = i915_gem_request_alloc(engine, NULL); - ret = PTR_ERR_OR_ZERO(request); - } - - if (ret) - goto cleanup_pending; - } - - ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation); - if (ret) + vma = intel_pin_and_fence_fb_obj(fb, primary->state->rotation); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); goto cleanup_pending; + } - work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), - obj, 0); + work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation); work->gtt_offset += intel_crtc->dspaddr_offset; work->rotation = crtc->primary->state->rotation; if (mmio_flip) { INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func); - i915_gem_request_assign(&work->flip_queued_req, - obj->last_write_req); - + work->flip_queued_req = i915_gem_active_get(&obj->last_write, + &obj->base.dev->struct_mutex); schedule_work(&work->mmio_work); } else { - i915_gem_request_assign(&work->flip_queued_req, request); + request = i915_gem_request_alloc(engine, engine->last_context); + if (IS_ERR(request)) { + ret = PTR_ERR(request); + goto cleanup_unpin; + } + + ret = i915_gem_object_sync(obj, request); + if (ret) + goto cleanup_request; + ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, page_flip_flags); if (ret) - goto cleanup_unpin; + goto cleanup_request; intel_mark_page_flip_active(intel_crtc, work); + work->flip_queued_req = i915_gem_request_get(request); i915_add_request_no_flush(request); } @@ -11740,25 +12174,25 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, to_intel_plane(primary)->frontbuffer_bit); mutex_unlock(&dev->struct_mutex); - intel_frontbuffer_flip_prepare(dev, + intel_frontbuffer_flip_prepare(to_i915(dev), to_intel_plane(primary)->frontbuffer_bit); trace_i915_flip_request(intel_crtc->plane, obj); return 0; +cleanup_request: + i915_add_request_no_flush(request); cleanup_unpin: intel_unpin_fb_obj(fb, crtc->primary->state->rotation); cleanup_pending: - if (!IS_ERR_OR_NULL(request)) - i915_add_request_no_flush(request); atomic_dec(&intel_crtc->unpin_work_count); mutex_unlock(&dev->struct_mutex); cleanup: crtc->primary->fb = old_fb; update_state_fb(crtc->primary); - drm_gem_object_unreference_unlocked(&obj->base); + i915_gem_object_put_unlocked(obj); drm_framebuffer_unreference(work->old_fb); spin_lock_irq(&dev->event_lock); @@ -11826,7 +12260,7 @@ static bool intel_wm_need_update(struct drm_plane *plane, struct intel_plane_state *cur = to_intel_plane_state(plane->state); /* Update watermarks on tiling or size changes. */ - if (new->visible != cur->visible) + if (new->base.visible != cur->base.visible) return true; if (!cur->base.fb || !new->base.fb) @@ -11834,10 +12268,10 @@ static bool intel_wm_need_update(struct drm_plane *plane, if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] || cur->base.rotation != new->base.rotation || - drm_rect_width(&new->src) != drm_rect_width(&cur->src) || - drm_rect_height(&new->src) != drm_rect_height(&cur->src) || - drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) || - drm_rect_height(&new->dst) != drm_rect_height(&cur->dst)) + drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) || + drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) || + drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) || + drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst)) return true; return false; @@ -11845,10 +12279,10 @@ static bool intel_wm_need_update(struct drm_plane *plane, static bool needs_scaling(struct intel_plane_state *state) { - int src_w = drm_rect_width(&state->src) >> 16; - int src_h = drm_rect_height(&state->src) >> 16; - int dst_w = drm_rect_width(&state->dst); - int dst_h = drm_rect_height(&state->dst); + int src_w = drm_rect_width(&state->base.src) >> 16; + int src_h = drm_rect_height(&state->base.src) >> 16; + int dst_w = drm_rect_width(&state->base.dst); + int dst_h = drm_rect_height(&state->base.dst); return (src_w != dst_w || src_h != dst_h); } @@ -11879,8 +12313,8 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, return ret; } - was_visible = old_plane_state->visible; - visible = to_intel_plane_state(plane_state)->visible; + was_visible = old_plane_state->base.visible; + visible = to_intel_plane_state(plane_state)->base.visible; if (!was_crtc_enabled && WARN_ON(was_visible)) was_visible = false; @@ -11896,7 +12330,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, * only combine the results from all planes in the current place? */ if (!is_crtc_enabled) - to_intel_plane_state(plane_state)->visible = visible = false; + to_intel_plane_state(plane_state)->base.visible = visible = false; if (!was_visible && !visible) return 0; @@ -12114,21 +12548,11 @@ connected_sink_compute_bpp(struct intel_connector *connector, pipe_config->pipe_bpp = connector->base.display_info.bpc*3; } - /* Clamp bpp to default limit on screens without EDID 1.4 */ - if (connector->base.display_info.bpc == 0) { - int type = connector->base.connector_type; - int clamp_bpp = 24; - - /* Fall back to 18 bpp when DP sink capability is unknown. */ - if (type == DRM_MODE_CONNECTOR_DisplayPort || - type == DRM_MODE_CONNECTOR_eDP) - clamp_bpp = 18; - - if (bpp > clamp_bpp) { - DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n", - bpp, clamp_bpp); - pipe_config->pipe_bpp = clamp_bpp; - } + /* Clamp bpp to 8 on screens without EDID 1.4 */ + if (connector->base.display_info.bpc == 0 && bpp > 24) { + DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", + bpp); + pipe_config->pipe_bpp = 24; } } @@ -12282,6 +12706,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, DRM_DEBUG_KMS("planes on this crtc\n"); list_for_each_entry(plane, &dev->mode_config.plane_list, head) { + char *format_name; intel_plane = to_intel_plane(plane); if (intel_plane->pipe != crtc->pipe) continue; @@ -12294,19 +12719,23 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, continue; } + format_name = drm_get_format_name(fb->pixel_format); + DRM_DEBUG_KMS("[PLANE:%d:%s] enabled", plane->base.id, plane->name); DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s", - fb->base.id, fb->width, fb->height, - drm_get_format_name(fb->pixel_format)); + fb->base.id, fb->width, fb->height, format_name); DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n", state->scaler_id, - state->src.x1 >> 16, state->src.y1 >> 16, - drm_rect_width(&state->src) >> 16, - drm_rect_height(&state->src) >> 16, - state->dst.x1, state->dst.y1, - drm_rect_width(&state->dst), - drm_rect_height(&state->dst)); + state->base.src.x1 >> 16, + state->base.src.y1 >> 16, + drm_rect_width(&state->base.src) >> 16, + drm_rect_height(&state->base.src) >> 16, + state->base.dst.x1, state->base.dst.y1, + drm_rect_width(&state->base.dst), + drm_rect_height(&state->base.dst)); + + kfree(format_name); } } @@ -12315,6 +12744,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state) struct drm_device *dev = state->dev; struct drm_connector *connector; unsigned int used_ports = 0; + unsigned int used_mst_ports = 0; /* * Walk the connector list instead of the encoder @@ -12351,11 +12781,20 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state) return false; used_ports |= port_mask; + break; + case INTEL_OUTPUT_DP_MST: + used_mst_ports |= + 1 << enc_to_mst(&encoder->base)->primary->port; + break; default: break; } } + /* can't mix MST and SST/HDMI on the same port */ + if (used_ports & used_mst_ports) + return false; + return true; } @@ -13523,8 +13962,8 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, if (!intel_plane_state->wait_req) continue; - ret = __i915_wait_request(intel_plane_state->wait_req, - true, NULL, NULL); + ret = i915_wait_request(intel_plane_state->wait_req, + true, NULL, NULL); if (ret) { /* Any hang should be swallowed by the wait */ WARN_ON(ret == -EIO); @@ -13636,8 +14075,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) if (!intel_plane_state->wait_req) continue; - ret = __i915_wait_request(intel_plane_state->wait_req, - true, NULL, NULL); + ret = i915_wait_request(intel_plane_state->wait_req, + true, NULL, NULL); /* EIO should be eaten, and we can't get interrupted in the * worker, and blocking commits have waited already. */ WARN_ON(ret); @@ -13814,19 +14253,12 @@ static void intel_atomic_track_fbs(struct drm_atomic_state *state) { struct drm_plane_state *old_plane_state; struct drm_plane *plane; - struct drm_i915_gem_object *obj, *old_obj; - struct intel_plane *intel_plane; int i; - mutex_lock(&state->dev->struct_mutex); - for_each_plane_in_state(state, plane, old_plane_state, i) { - obj = intel_fb_obj(plane->state->fb); - old_obj = intel_fb_obj(old_plane_state->fb); - intel_plane = to_intel_plane(plane); - - i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); - } - mutex_unlock(&state->dev->struct_mutex); + for_each_plane_in_state(state, plane, old_plane_state, i) + i915_gem_track_fb(intel_fb_obj(old_plane_state->fb), + intel_fb_obj(plane->state->fb), + to_intel_plane(plane)->frontbuffer_bit); } /** @@ -13992,7 +14424,7 @@ static const struct drm_crtc_funcs intel_crtc_funcs = { */ int intel_prepare_plane_fb(struct drm_plane *plane, - const struct drm_plane_state *new_state) + struct drm_plane_state *new_state) { struct drm_device *dev = plane->dev; struct drm_framebuffer *fb = new_state->fb; @@ -14051,15 +14483,17 @@ intel_prepare_plane_fb(struct drm_plane *plane, if (ret) DRM_DEBUG_KMS("failed to attach phys object\n"); } else { - ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation); + struct i915_vma *vma; + + vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation); + if (IS_ERR(vma)) + ret = PTR_ERR(vma); } if (ret == 0) { - struct intel_plane_state *plane_state = - to_intel_plane_state(new_state); - - i915_gem_request_assign(&plane_state->wait_req, - obj->last_write_req); + to_intel_plane_state(new_state)->wait_req = + i915_gem_active_get(&obj->last_write, + &obj->base.dev->struct_mutex); } return ret; @@ -14076,10 +14510,11 @@ intel_prepare_plane_fb(struct drm_plane *plane, */ void intel_cleanup_plane_fb(struct drm_plane *plane, - const struct drm_plane_state *old_state) + struct drm_plane_state *old_state) { struct drm_device *dev = plane->dev; struct intel_plane_state *old_intel_state; + struct intel_plane_state *intel_state = to_intel_plane_state(plane->state); struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb); struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb); @@ -14092,6 +14527,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane, !INTEL_INFO(dev)->cursor_needs_physical)) intel_unpin_fb_obj(old_state->fb, old_state->rotation); + i915_gem_request_assign(&intel_state->wait_req, NULL); i915_gem_request_assign(&old_intel_state->wait_req, NULL); } @@ -14126,13 +14562,14 @@ intel_check_primary_plane(struct drm_plane *plane, struct intel_crtc_state *crtc_state, struct intel_plane_state *state) { + struct drm_i915_private *dev_priv = to_i915(plane->dev); struct drm_crtc *crtc = state->base.crtc; - struct drm_framebuffer *fb = state->base.fb; int min_scale = DRM_PLANE_HELPER_NO_SCALING; int max_scale = DRM_PLANE_HELPER_NO_SCALING; bool can_position = false; + int ret; - if (INTEL_INFO(plane->dev)->gen >= 9) { + if (INTEL_GEN(dev_priv) >= 9) { /* use scaler when colorkey is not required */ if (state->ckey.flags == I915_SET_COLORKEY_NONE) { min_scale = 1; @@ -14141,12 +14578,23 @@ intel_check_primary_plane(struct drm_plane *plane, can_position = true; } - return drm_plane_helper_check_update(plane, crtc, fb, &state->src, - &state->dst, &state->clip, - state->base.rotation, - min_scale, max_scale, - can_position, true, - &state->visible); + ret = drm_plane_helper_check_state(&state->base, + &state->clip, + min_scale, max_scale, + can_position, true); + if (ret) + return ret; + + if (!state->base.fb) + return 0; + + if (INTEL_GEN(dev_priv) >= 9) { + ret = skl_check_plane_surface(state); + if (ret) + return ret; + } + + return 0; } static void intel_begin_crtc_commit(struct drm_crtc *crtc, @@ -14306,11 +14754,11 @@ fail: void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane) { if (!dev->mode_config.rotation_property) { - unsigned long flags = BIT(DRM_ROTATE_0) | - BIT(DRM_ROTATE_180); + unsigned long flags = DRM_ROTATE_0 | + DRM_ROTATE_180; if (INTEL_INFO(dev)->gen >= 9) - flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270); + flags |= DRM_ROTATE_90 | DRM_ROTATE_270; dev->mode_config.rotation_property = drm_mode_create_rotation_property(dev, flags); @@ -14326,19 +14774,17 @@ intel_check_cursor_plane(struct drm_plane *plane, struct intel_crtc_state *crtc_state, struct intel_plane_state *state) { - struct drm_crtc *crtc = crtc_state->base.crtc; struct drm_framebuffer *fb = state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); enum pipe pipe = to_intel_plane(plane)->pipe; unsigned stride; int ret; - ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src, - &state->dst, &state->clip, - state->base.rotation, - DRM_PLANE_HELPER_NO_SCALING, - DRM_PLANE_HELPER_NO_SCALING, - true, true, &state->visible); + ret = drm_plane_helper_check_state(&state->base, + &state->clip, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + true, true); if (ret) return ret; @@ -14375,7 +14821,7 @@ intel_check_cursor_plane(struct drm_plane *plane, * Refuse the put the cursor into that compromised position. */ if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C && - state->visible && state->base.crtc_x < 0) { + state->base.visible && state->base.crtc_x < 0) { DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); return -EINVAL; } @@ -14407,7 +14853,7 @@ intel_update_cursor_plane(struct drm_plane *plane, if (!obj) addr = 0; else if (!INTEL_INFO(dev)->cursor_needs_physical) - addr = i915_gem_obj_ggtt_offset(obj); + addr = i915_gem_object_ggtt_offset(obj, NULL); else addr = obj->phys_handle->busaddr; @@ -14453,8 +14899,8 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, if (!dev->mode_config.rotation_property) dev->mode_config.rotation_property = drm_mode_create_rotation_property(dev, - BIT(DRM_ROTATE_0) | - BIT(DRM_ROTATE_180)); + DRM_ROTATE_0 | + DRM_ROTATE_180); if (dev->mode_config.rotation_property) drm_object_attach_property(&cursor->base.base, dev->mode_config.rotation_property, @@ -14660,12 +15106,50 @@ static bool intel_crt_present(struct drm_device *dev) return true; } +void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) +{ + int pps_num; + int pps_idx; + + if (HAS_DDI(dev_priv)) + return; + /* + * This w/a is needed at least on CPT/PPT, but to be sure apply it + * everywhere where registers can be write protected. + */ + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + pps_num = 2; + else + pps_num = 1; + + for (pps_idx = 0; pps_idx < pps_num; pps_idx++) { + u32 val = I915_READ(PP_CONTROL(pps_idx)); + + val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS; + I915_WRITE(PP_CONTROL(pps_idx), val); + } +} + +static void intel_pps_init(struct drm_i915_private *dev_priv) +{ + if (HAS_PCH_SPLIT(dev_priv) || IS_BROXTON(dev_priv)) + dev_priv->pps_mmio_base = PCH_PPS_BASE; + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + dev_priv->pps_mmio_base = VLV_PPS_BASE; + else + dev_priv->pps_mmio_base = PPS_BASE; + + intel_pps_unlock_regs_wa(dev_priv); +} + static void intel_setup_outputs(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; bool dpd_is_edp = false; + intel_pps_init(dev_priv); + /* * intel_edp_init_connector() depends on this completing first, to * prevent the registeration of both eDP and LVDS and the incorrect @@ -14853,7 +15337,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) drm_framebuffer_cleanup(fb); mutex_lock(&dev->struct_mutex); WARN_ON(!intel_fb->obj->framebuffer_references--); - drm_gem_object_unreference(&intel_fb->obj->base); + i915_gem_object_put(intel_fb->obj); mutex_unlock(&dev->struct_mutex); kfree(intel_fb); } @@ -14933,24 +15417,27 @@ static int intel_framebuffer_init(struct drm_device *dev, struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = to_i915(dev); - unsigned int aligned_height; + unsigned int tiling = i915_gem_object_get_tiling(obj); int ret; u32 pitch_limit, stride_alignment; + char *format_name; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { - /* Enforce that fb modifier and tiling mode match, but only for - * X-tiled. This is needed for FBC. */ - if (!!(obj->tiling_mode == I915_TILING_X) != - !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) { + /* + * If there's a fence, enforce that + * the fb modifier and tiling mode match. + */ + if (tiling != I915_TILING_NONE && + tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { DRM_DEBUG("tiling_mode doesn't match fb modifier\n"); return -EINVAL; } } else { - if (obj->tiling_mode == I915_TILING_X) + if (tiling == I915_TILING_X) { mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; - else if (obj->tiling_mode == I915_TILING_Y) { + } else if (tiling == I915_TILING_Y) { DRM_DEBUG("No Y tiling for legacy addfb\n"); return -EINVAL; } @@ -14974,6 +15461,16 @@ static int intel_framebuffer_init(struct drm_device *dev, return -EINVAL; } + /* + * gen2/3 display engine uses the fence if present, + * so the tiling mode must match the fb modifier exactly. + */ + if (INTEL_INFO(dev_priv)->gen < 4 && + tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { + DRM_DEBUG("tiling_mode must match fb modifier exactly on gen2/3\n"); + return -EINVAL; + } + stride_alignment = intel_fb_stride_alignment(dev_priv, mode_cmd->modifier[0], mode_cmd->pixel_format); @@ -14993,10 +15490,15 @@ static int intel_framebuffer_init(struct drm_device *dev, return -EINVAL; } - if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED && - mode_cmd->pitches[0] != obj->stride) { + /* + * If there's a fence, enforce that + * the fb pitch and fence stride match. + */ + if (tiling != I915_TILING_NONE && + mode_cmd->pitches[0] != i915_gem_object_get_stride(obj)) { DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", - mode_cmd->pitches[0], obj->stride); + mode_cmd->pitches[0], + i915_gem_object_get_stride(obj)); return -EINVAL; } @@ -15009,16 +15511,18 @@ static int intel_framebuffer_init(struct drm_device *dev, break; case DRM_FORMAT_XRGB1555: if (INTEL_INFO(dev)->gen > 3) { - DRM_DEBUG("unsupported pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format)); + format_name = drm_get_format_name(mode_cmd->pixel_format); + DRM_DEBUG("unsupported pixel format: %s\n", format_name); + kfree(format_name); return -EINVAL; } break; case DRM_FORMAT_ABGR8888: if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && INTEL_INFO(dev)->gen < 9) { - DRM_DEBUG("unsupported pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format)); + format_name = drm_get_format_name(mode_cmd->pixel_format); + DRM_DEBUG("unsupported pixel format: %s\n", format_name); + kfree(format_name); return -EINVAL; } break; @@ -15026,15 +15530,17 @@ static int intel_framebuffer_init(struct drm_device *dev, case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: if (INTEL_INFO(dev)->gen < 4) { - DRM_DEBUG("unsupported pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format)); + format_name = drm_get_format_name(mode_cmd->pixel_format); + DRM_DEBUG("unsupported pixel format: %s\n", format_name); + kfree(format_name); return -EINVAL; } break; case DRM_FORMAT_ABGR2101010: if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { - DRM_DEBUG("unsupported pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format)); + format_name = drm_get_format_name(mode_cmd->pixel_format); + DRM_DEBUG("unsupported pixel format: %s\n", format_name); + kfree(format_name); return -EINVAL; } break; @@ -15043,14 +15549,16 @@ static int intel_framebuffer_init(struct drm_device *dev, case DRM_FORMAT_YVYU: case DRM_FORMAT_VYUY: if (INTEL_INFO(dev)->gen < 5) { - DRM_DEBUG("unsupported pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format)); + format_name = drm_get_format_name(mode_cmd->pixel_format); + DRM_DEBUG("unsupported pixel format: %s\n", format_name); + kfree(format_name); return -EINVAL; } break; default: - DRM_DEBUG("unsupported pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format)); + format_name = drm_get_format_name(mode_cmd->pixel_format); + DRM_DEBUG("unsupported pixel format: %s\n", format_name); + kfree(format_name); return -EINVAL; } @@ -15058,17 +15566,12 @@ static int intel_framebuffer_init(struct drm_device *dev, if (mode_cmd->offsets[0] != 0) return -EINVAL; - aligned_height = intel_fb_align_height(dev, mode_cmd->height, - mode_cmd->pixel_format, - mode_cmd->modifier[0]); - /* FIXME drm helper for size checks (especially planar formats)? */ - if (obj->base.size < aligned_height * mode_cmd->pitches[0]) - return -EINVAL; - drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); intel_fb->obj = obj; - intel_fill_fb_info(dev_priv, &intel_fb->base); + ret = intel_fill_fb_info(dev_priv, &intel_fb->base); + if (ret) + return ret; ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); if (ret) { @@ -15090,13 +15593,13 @@ intel_user_framebuffer_create(struct drm_device *dev, struct drm_i915_gem_object *obj; struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; - obj = to_intel_bo(drm_gem_object_lookup(filp, mode_cmd.handles[0])); - if (&obj->base == NULL) + obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]); + if (!obj) return ERR_PTR(-ENOENT); fb = intel_framebuffer_create(dev, &mode_cmd, obj); if (IS_ERR(fb)) - drm_gem_object_unreference_unlocked(&obj->base); + i915_gem_object_put_unlocked(obj); return fb; } @@ -15504,7 +16007,6 @@ void intel_modeset_init_hw(struct drm_device *dev) dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; intel_init_clock_gating(dev); - intel_enable_gt_powersave(dev_priv); } /* @@ -15782,6 +16284,13 @@ static bool intel_encoder_has_connectors(struct intel_encoder *encoder) return false; } +static bool has_pch_trancoder(struct drm_i915_private *dev_priv, + enum transcoder pch_transcoder) +{ + return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || + (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A); +} + static void intel_sanitize_crtc(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; @@ -15825,7 +16334,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) * Temporarily change the plane mapping and disable everything * ... */ plane = crtc->plane; - to_intel_plane_state(crtc->base.primary->state)->visible = true; + to_intel_plane_state(crtc->base.primary->state)->base.visible = true; crtc->plane = !plane; intel_crtc_disable_noatomic(&crtc->base); crtc->plane = plane; @@ -15860,7 +16369,17 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) * worst a fifo underrun happens which also sets this to false. */ crtc->cpu_fifo_underrun_disabled = true; - crtc->pch_fifo_underrun_disabled = true; + /* + * We track the PCH trancoder underrun reporting state + * within the crtc. With crtc for pipe A housing the underrun + * reporting state for PCH transcoder A, crtc for pipe B housing + * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, + * and marking underrun reporting as disabled for the non-existing + * PCH transcoders B and C would prevent enabling the south + * error interrupt (see cpt_can_enable_serr_int()). + */ + if (has_pch_trancoder(dev_priv, (enum transcoder)crtc->pipe)) + crtc->pch_fifo_underrun_disabled = true; } } @@ -15952,10 +16471,10 @@ static void readout_plane_state(struct intel_crtc *crtc) struct intel_plane_state *plane_state = to_intel_plane_state(primary->state); - plane_state->visible = crtc->active && + plane_state->base.visible = crtc->active && primary_get_hw_state(to_intel_plane(primary)); - if (plane_state->visible) + if (plane_state->base.visible) crtc->base.state->plane_mask |= 1 << drm_plane_index(primary); } @@ -16174,9 +16693,10 @@ void intel_display_resume(struct drm_device *dev) struct drm_atomic_state *state = dev_priv->modeset_restore_state; struct drm_modeset_acquire_ctx ctx; int ret; - bool setup = false; dev_priv->modeset_restore_state = NULL; + if (state) + state->acquire_ctx = &ctx; /* * This is a cludge because with real atomic modeset mode_config.mutex @@ -16187,43 +16707,17 @@ void intel_display_resume(struct drm_device *dev) mutex_lock(&dev->mode_config.mutex); drm_modeset_acquire_init(&ctx, 0); -retry: - ret = drm_modeset_lock_all_ctx(dev, &ctx); - - if (ret == 0 && !setup) { - setup = true; - - intel_modeset_setup_hw_state(dev); - i915_redisable_vga(dev); - } - - if (ret == 0 && state) { - struct drm_crtc_state *crtc_state; - struct drm_crtc *crtc; - int i; - - state->acquire_ctx = &ctx; - - /* ignore any reset values/BIOS leftovers in the WM registers */ - to_intel_atomic_state(state)->skip_intermediate_wm = true; - - for_each_crtc_in_state(state, crtc, crtc_state, i) { - /* - * Force recalculation even if we restore - * current state. With fast modeset this may not result - * in a modeset when the state is compatible. - */ - crtc_state->mode_changed = true; - } - - ret = drm_atomic_commit(state); - } + while (1) { + ret = drm_modeset_lock_all_ctx(dev, &ctx); + if (ret != -EDEADLK) + break; - if (ret == -EDEADLK) { drm_modeset_backoff(&ctx); - goto retry; } + if (!ret) + ret = __intel_display_resume(dev, state); + drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); mutex_unlock(&dev->mode_config.mutex); @@ -16239,7 +16733,6 @@ void intel_modeset_gem_init(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *c; struct drm_i915_gem_object *obj; - int ret; intel_init_gt_powersave(dev_priv); @@ -16253,15 +16746,17 @@ void intel_modeset_gem_init(struct drm_device *dev) * for this. */ for_each_crtc(dev, c) { + struct i915_vma *vma; + obj = intel_fb_obj(c->primary->fb); if (obj == NULL) continue; mutex_lock(&dev->struct_mutex); - ret = intel_pin_and_fence_fb_obj(c->primary->fb, + vma = intel_pin_and_fence_fb_obj(c->primary->fb, c->primary->state->rotation); mutex_unlock(&dev->struct_mutex); - if (ret) { + if (IS_ERR(vma)) { DRM_ERROR("failed to pin boot fb on pipe %d\n", to_intel_crtc(c)->pipe); drm_framebuffer_unreference(c->primary->fb); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 21b04c3eda41..364db908c191 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -256,6 +256,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, static void intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, struct intel_dp *intel_dp); +static void +intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp); static void pps_lock(struct intel_dp *intel_dp) { @@ -463,13 +465,13 @@ typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv, enum pipe pipe) { - return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON; + return I915_READ(PP_STATUS(pipe)) & PP_ON; } static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv, enum pipe pipe) { - return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD; + return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD; } static bool vlv_pipe_any(struct drm_i915_private *dev_priv, @@ -486,7 +488,7 @@ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv, enum pipe pipe; for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { - u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) & + u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) & PANEL_PORT_SELECT_MASK; if (port_sel != PANEL_PORT_SELECT_VLV(port)) @@ -583,30 +585,21 @@ static void intel_pps_get_registers(struct drm_i915_private *dev_priv, struct intel_dp *intel_dp, struct pps_registers *regs) { + int pps_idx = 0; + memset(regs, 0, sizeof(*regs)); - if (IS_BROXTON(dev_priv)) { - int idx = bxt_power_sequencer_idx(intel_dp); - - regs->pp_ctrl = BXT_PP_CONTROL(idx); - regs->pp_stat = BXT_PP_STATUS(idx); - regs->pp_on = BXT_PP_ON_DELAYS(idx); - regs->pp_off = BXT_PP_OFF_DELAYS(idx); - } else if (HAS_PCH_SPLIT(dev_priv)) { - regs->pp_ctrl = PCH_PP_CONTROL; - regs->pp_stat = PCH_PP_STATUS; - regs->pp_on = PCH_PP_ON_DELAYS; - regs->pp_off = PCH_PP_OFF_DELAYS; - regs->pp_div = PCH_PP_DIVISOR; - } else { - enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); + if (IS_BROXTON(dev_priv)) + pps_idx = bxt_power_sequencer_idx(intel_dp); + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + pps_idx = vlv_power_sequencer_pipe(intel_dp); - regs->pp_ctrl = VLV_PIPE_PP_CONTROL(pipe); - regs->pp_stat = VLV_PIPE_PP_STATUS(pipe); - regs->pp_on = VLV_PIPE_PP_ON_DELAYS(pipe); - regs->pp_off = VLV_PIPE_PP_OFF_DELAYS(pipe); - regs->pp_div = VLV_PIPE_PP_DIVISOR(pipe); - } + regs->pp_ctrl = PP_CONTROL(pps_idx); + regs->pp_stat = PP_STATUS(pps_idx); + regs->pp_on = PP_ON_DELAYS(pps_idx); + regs->pp_off = PP_OFF_DELAYS(pps_idx); + if (!IS_BROXTON(dev_priv)) + regs->pp_div = PP_DIVISOR(pps_idx); } static i915_reg_t @@ -651,8 +644,8 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code, i915_reg_t pp_ctrl_reg, pp_div_reg; u32 pp_div; - pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe); - pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); + pp_ctrl_reg = PP_CONTROL(pipe); + pp_div_reg = PP_DIVISOR(pipe); pp_div = I915_READ(pp_div_reg); pp_div &= PP_REFERENCE_DIVIDER_MASK; @@ -1041,10 +1034,10 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) if (WARN_ON(txsize > 20)) return -E2BIG; + WARN_ON(!msg->buffer != !msg->size); + if (msg->buffer) memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); - else - WARN_ON(msg->size); ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize); if (ret > 0) { @@ -1447,7 +1440,7 @@ intel_dp_max_link_rate(struct intel_dp *intel_dp) if (WARN_ON(len <= 0)) return 162000; - return rates[rate_to_index(0, rates) - 1]; + return rates[len - 1]; } int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) @@ -1651,6 +1644,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp, { intel_dp->link_rate = pipe_config->port_clock; intel_dp->lane_count = pipe_config->lane_count; + intel_dp->link_mst = intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST); } static void intel_dp_prepare(struct intel_encoder *encoder) @@ -1835,7 +1829,8 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) lockdep_assert_held(&dev_priv->pps_mutex); control = I915_READ(_pp_ctrl_reg(intel_dp)); - if (!IS_BROXTON(dev)) { + if (WARN_ON(!HAS_DDI(dev_priv) && + (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) { control &= ~PANEL_UNLOCK_MASK; control |= PANEL_UNLOCK_REGS; } @@ -1956,7 +1951,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); - if ((pp & POWER_TARGET_ON) == 0) + if ((pp & PANEL_POWER_ON) == 0) intel_dp->panel_power_off_time = ktime_get_boottime(); power_domain = intel_display_port_aux_power_domain(intel_encoder); @@ -2043,7 +2038,7 @@ static void edp_panel_on(struct intel_dp *intel_dp) POSTING_READ(pp_ctrl_reg); } - pp |= POWER_TARGET_ON; + pp |= PANEL_POWER_ON; if (!IS_GEN5(dev)) pp |= PANEL_POWER_RESET; @@ -2095,7 +2090,7 @@ static void edp_panel_off(struct intel_dp *intel_dp) pp = ironlake_get_pp_control(intel_dp); /* We need to switch off panel power _and_ force vdd, for otherwise some * panels get very unhappy and cease to work. */ - pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | + pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | EDP_BLC_ENABLE); pp_ctrl_reg = _pp_ctrl_reg(intel_dp); @@ -2728,7 +2723,7 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); enum pipe pipe = intel_dp->pps_pipe; - i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); + i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe); edp_panel_vdd_off_sync(intel_dp); @@ -3395,84 +3390,67 @@ intel_dp_link_down(struct intel_dp *intel_dp) } static bool -intel_dp_get_dpcd(struct intel_dp *intel_dp) +intel_dp_read_dpcd(struct intel_dp *intel_dp) { - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, sizeof(intel_dp->dpcd)) < 0) return false; /* aux transfer failed */ DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd); - if (intel_dp->dpcd[DP_DPCD_REV] == 0) - return false; /* DPCD not present */ + return intel_dp->dpcd[DP_DPCD_REV] != 0; +} - if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT, - &intel_dp->sink_count, 1) < 0) - return false; +static bool +intel_edp_init_dpcd(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = + to_i915(dp_to_dig_port(intel_dp)->base.base.dev); - /* - * Sink count can change between short pulse hpd hence - * a member variable in intel_dp will track any changes - * between short pulse interrupts. - */ - intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count); + /* this function is meant to be called only once */ + WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0); - /* - * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that - * a dongle is present but no display. Unless we require to know - * if a dongle is present or not, we don't need to update - * downstream port information. So, an early return here saves - * time from performing other operations which are not required. - */ - if (!is_edp(intel_dp) && !intel_dp->sink_count) + if (!intel_dp_read_dpcd(intel_dp)) return false; - /* Check if the panel supports PSR */ - memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); - if (is_edp(intel_dp)) { - drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, - intel_dp->psr_dpcd, - sizeof(intel_dp->psr_dpcd)); - if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) { - dev_priv->psr.sink_support = true; - DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); - } - - if (INTEL_INFO(dev)->gen >= 9 && - (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) { - uint8_t frame_sync_cap; - - dev_priv->psr.sink_support = true; - drm_dp_dpcd_read(&intel_dp->aux, - DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP, - &frame_sync_cap, 1); - dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false; - /* PSR2 needs frame sync as well */ - dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync; - DRM_DEBUG_KMS("PSR2 %s on sink", - dev_priv->psr.psr2_support ? "supported" : "not supported"); - } - - /* Read the eDP Display control capabilities registers */ - memset(intel_dp->edp_dpcd, 0, sizeof(intel_dp->edp_dpcd)); - if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && - (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, - intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == - sizeof(intel_dp->edp_dpcd))) - DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd), - intel_dp->edp_dpcd); - } + if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) + dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] & + DP_NO_AUX_HANDSHAKE_LINK_TRAINING; - DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n", - yesno(intel_dp_source_supports_hbr2(intel_dp)), - yesno(drm_dp_tps3_supported(intel_dp->dpcd))); + /* Check if the panel supports PSR */ + drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, + intel_dp->psr_dpcd, + sizeof(intel_dp->psr_dpcd)); + if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) { + dev_priv->psr.sink_support = true; + DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); + } + + if (INTEL_GEN(dev_priv) >= 9 && + (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) { + uint8_t frame_sync_cap; + + dev_priv->psr.sink_support = true; + drm_dp_dpcd_read(&intel_dp->aux, + DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP, + &frame_sync_cap, 1); + dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false; + /* PSR2 needs frame sync as well */ + dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync; + DRM_DEBUG_KMS("PSR2 %s on sink", + dev_priv->psr.psr2_support ? "supported" : "not supported"); + } + + /* Read the eDP Display control capabilities registers */ + if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && + drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, + intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd) == + sizeof(intel_dp->edp_dpcd))) + DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd), + intel_dp->edp_dpcd); /* Intermediate frequency support */ - if (is_edp(intel_dp) && (intel_dp->edp_dpcd[0] >= 0x03)) { /* eDp v1.4 or higher */ + if (intel_dp->edp_dpcd[0] >= 0x03) { /* eDp v1.4 or higher */ __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; int i; @@ -3491,7 +3469,36 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) intel_dp->num_sink_rates = i; } - intel_dp_print_rates(intel_dp); + return true; +} + + +static bool +intel_dp_get_dpcd(struct intel_dp *intel_dp) +{ + if (!intel_dp_read_dpcd(intel_dp)) + return false; + + if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT, + &intel_dp->sink_count, 1) < 0) + return false; + + /* + * Sink count can change between short pulse hpd hence + * a member variable in intel_dp will track any changes + * between short pulse interrupts. + */ + intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count); + + /* + * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that + * a dongle is present but no display. Unless we require to know + * if a dongle is present or not, we don't need to update + * downstream port information. So, an early return here saves + * time from performing other operations which are not required. + */ + if (!is_edp(intel_dp) && !intel_dp->sink_count) + return false; if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) @@ -3526,7 +3533,7 @@ intel_dp_probe_oui(struct intel_dp *intel_dp) } static bool -intel_dp_probe_mst(struct intel_dp *intel_dp) +intel_dp_can_mst(struct intel_dp *intel_dp) { u8 buf[1]; @@ -3539,18 +3546,30 @@ intel_dp_probe_mst(struct intel_dp *intel_dp) if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) return false; - if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) { - if (buf[0] & DP_MST_CAP) { - DRM_DEBUG_KMS("Sink is MST capable\n"); - intel_dp->is_mst = true; - } else { - DRM_DEBUG_KMS("Sink is not MST capable\n"); - intel_dp->is_mst = false; - } - } + if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1) != 1) + return false; - drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); - return intel_dp->is_mst; + return buf[0] & DP_MST_CAP; +} + +static void +intel_dp_configure_mst(struct intel_dp *intel_dp) +{ + if (!i915.enable_dp_mst) + return; + + if (!intel_dp->can_mst) + return; + + intel_dp->is_mst = intel_dp_can_mst(intel_dp); + + if (intel_dp->is_mst) + DRM_DEBUG_KMS("Sink is MST capable\n"); + else + DRM_DEBUG_KMS("Sink is not MST capable\n"); + + drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, + intel_dp->is_mst); } static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp) @@ -3909,7 +3928,7 @@ static bool intel_dp_short_pulse(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - u8 sink_irq_vector; + u8 sink_irq_vector = 0; u8 old_sink_count = intel_dp->sink_count; bool ret; @@ -3936,7 +3955,8 @@ intel_dp_short_pulse(struct intel_dp *intel_dp) /* Try to read the source of the interrupt */ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && - intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { + intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) && + sink_irq_vector != 0) { /* Clear interrupt source */ drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, @@ -3980,6 +4000,9 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) connector_status_connected : connector_status_disconnected; } + if (intel_dp_can_mst(intel_dp)) + return connector_status_connected; + /* If no HPD, poke DDC gently */ if (drm_probe_ddc(&intel_dp->aux.ddc)) return connector_status_connected; @@ -4217,8 +4240,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector) struct drm_device *dev = connector->dev; enum drm_connector_status status; enum intel_display_power_domain power_domain; - bool ret; - u8 sink_irq_vector; + u8 sink_irq_vector = 0; power_domain = intel_display_port_aux_power_domain(intel_encoder); intel_display_power_get(to_i915(dev), power_domain); @@ -4252,10 +4274,17 @@ intel_dp_long_pulse(struct intel_connector *intel_connector) if (intel_encoder->type != INTEL_OUTPUT_EDP) intel_encoder->type = INTEL_OUTPUT_DP; + DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n", + yesno(intel_dp_source_supports_hbr2(intel_dp)), + yesno(drm_dp_tps3_supported(intel_dp->dpcd))); + + intel_dp_print_rates(intel_dp); + intel_dp_probe_oui(intel_dp); - ret = intel_dp_probe_mst(intel_dp); - if (ret) { + intel_dp_configure_mst(intel_dp); + + if (intel_dp->is_mst) { /* * If we are in MST mode then this connector * won't appear connected or have anything @@ -4290,7 +4319,8 @@ intel_dp_long_pulse(struct intel_connector *intel_connector) /* Try to read the source of the interrupt */ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && - intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { + intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) && + sink_irq_vector != 0) { /* Clear interrupt source */ drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, @@ -4630,13 +4660,8 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder) pps_lock(intel_dp); - /* - * Read out the current power sequencer assignment, - * in case the BIOS did something with it. - */ - if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev)) - vlv_initial_power_sequencer_setup(intel_dp); - + /* Reinit the power sequencer, in case BIOS did something with it. */ + intel_dp_pps_init(encoder->dev, intel_dp); intel_edp_panel_vdd_sanitize(intel_dp); pps_unlock(intel_dp); @@ -4984,6 +5009,17 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, I915_READ(regs.pp_div)); } +static void intel_dp_pps_init(struct drm_device *dev, + struct intel_dp *intel_dp) +{ + if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + vlv_initial_power_sequencer_setup(intel_dp); + } else { + intel_dp_init_panel_power_sequencer(dev, intel_dp); + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); + } +} + /** * intel_dp_set_drrs_state - program registers for RR switch to take effect * @dev: DRM device @@ -5186,7 +5222,7 @@ unlock: /** * intel_edp_drrs_invalidate - Disable Idleness DRRS - * @dev: DRM device + * @dev_priv: i915 device * @frontbuffer_bits: frontbuffer plane tracking bits * * This function gets called everytime rendering on the given planes start. @@ -5194,10 +5230,9 @@ unlock: * * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. */ -void intel_edp_drrs_invalidate(struct drm_device *dev, - unsigned frontbuffer_bits) +void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits) { - struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc; enum pipe pipe; @@ -5229,7 +5264,7 @@ void intel_edp_drrs_invalidate(struct drm_device *dev, /** * intel_edp_drrs_flush - Restart Idleness DRRS - * @dev: DRM device + * @dev_priv: i915 device * @frontbuffer_bits: frontbuffer plane tracking bits * * This function gets called every time rendering on the given planes has @@ -5239,10 +5274,9 @@ void intel_edp_drrs_invalidate(struct drm_device *dev, * * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. */ -void intel_edp_drrs_flush(struct drm_device *dev, - unsigned frontbuffer_bits) +void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits) { - struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc; enum pipe pipe; @@ -5400,27 +5434,15 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, pps_lock(intel_dp); intel_dp_init_panel_power_timestamps(intel_dp); - - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { - vlv_initial_power_sequencer_setup(intel_dp); - } else { - intel_dp_init_panel_power_sequencer(dev, intel_dp); - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); - } - + intel_dp_pps_init(dev, intel_dp); intel_edp_panel_vdd_sanitize(intel_dp); pps_unlock(intel_dp); /* Cache DPCD and EDID for edp. */ - has_dpcd = intel_dp_get_dpcd(intel_dp); + has_dpcd = intel_edp_init_dpcd(intel_dp); - if (has_dpcd) { - if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) - dev_priv->no_aux_handshake = - intel_dp->dpcd[DP_MAX_DOWNSPREAD] & - DP_NO_AUX_HANDSHAKE_LINK_TRAINING; - } else { + if (!has_dpcd) { /* if this fails, presume the device is a ghost */ DRM_INFO("failed to retrieve link info, disabling eDP\n"); goto out_vdd_off; diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 68a005d729e9..629337dbca3d 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -170,10 +170,10 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder) intel_mst->connector = found; if (intel_dp->active_mst_links == 0) { - intel_prepare_ddi_buffer(&intel_dig_port->base); - intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config); + intel_prepare_dp_ddi_buffers(&intel_dig_port->base); + intel_dp_set_link_params(intel_dp, intel_crtc->config); intel_ddi_init_dp_buf_reg(&intel_dig_port->base); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index cc937a19b1ba..774aab342f40 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -178,11 +178,22 @@ struct intel_framebuffer { struct drm_framebuffer base; struct drm_i915_gem_object *obj; struct intel_rotation_info rot_info; + + /* for each plane in the normal GTT view */ + struct { + unsigned int x, y; + } normal[2]; + /* for each plane in the rotated GTT view */ + struct { + unsigned int x, y; + unsigned int pitch; /* pixels */ + } rotated[2]; }; struct intel_fbdev { struct drm_fb_helper helper; struct intel_framebuffer *fb; + struct i915_vma *vma; async_cookie_t cookie; int preferred_bpp; }; @@ -338,10 +349,16 @@ struct intel_atomic_state { struct intel_plane_state { struct drm_plane_state base; - struct drm_rect src; - struct drm_rect dst; struct drm_rect clip; - bool visible; + + struct { + u32 offset; + int x, y; + } main; + struct { + u32 offset; + int x, y; + } aux; /* * scaler_id @@ -852,6 +869,7 @@ struct intel_dp { int link_rate; uint8_t lane_count; uint8_t sink_count; + bool link_mst; bool has_audio; bool detect_done; enum hdmi_force_audio force_audio; @@ -1107,7 +1125,7 @@ void intel_crt_reset(struct drm_encoder *encoder); /* intel_ddi.c */ void intel_ddi_clk_select(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config); -void intel_prepare_ddi_buffer(struct intel_encoder *encoder); +void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder); void hsw_fdi_link_train(struct drm_crtc *crtc); void intel_ddi_init(struct drm_device *dev, enum port port); enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder); @@ -1134,21 +1152,10 @@ void intel_ddi_clock_get(struct intel_encoder *encoder, void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state); uint32_t ddi_signal_levels(struct intel_dp *intel_dp); -/* intel_frontbuffer.c */ -void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, - enum fb_op_origin origin); -void intel_frontbuffer_flip_prepare(struct drm_device *dev, - unsigned frontbuffer_bits); -void intel_frontbuffer_flip_complete(struct drm_device *dev, - unsigned frontbuffer_bits); -void intel_frontbuffer_flip(struct drm_device *dev, - unsigned frontbuffer_bits); unsigned int intel_fb_align_height(struct drm_device *dev, unsigned int height, uint32_t pixel_format, uint64_t fb_format_modifier); -void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire, - enum fb_op_origin origin); u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv, uint64_t fb_modifier, uint32_t pixel_format); @@ -1166,12 +1173,18 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv, const char *name, u32 reg, int ref_freq); extern const struct drm_plane_funcs intel_plane_funcs; void intel_init_display_hooks(struct drm_i915_private *dev_priv); +unsigned int intel_fb_xy_to_linear(int x, int y, + const struct intel_plane_state *state, + int plane); +void intel_add_fb_offsets(int *x, int *y, + const struct intel_plane_state *state, int plane); unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info); bool intel_has_pending_fb_unpin(struct drm_device *dev); void intel_mark_busy(struct drm_i915_private *dev_priv); void intel_mark_idle(struct drm_i915_private *dev_priv); void intel_crtc_restore_mode(struct drm_crtc *crtc); int intel_display_suspend(struct drm_device *dev); +void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv); void intel_encoder_destroy(struct drm_encoder *encoder); int intel_connector_init(struct intel_connector *); struct intel_connector *intel_connector_alloc(void); @@ -1227,8 +1240,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, void intel_release_load_detect_pipe(struct drm_connector *connector, struct intel_load_detect_pipe *old, struct drm_modeset_acquire_ctx *ctx); -int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, - unsigned int rotation); +struct i915_vma * +intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); struct drm_framebuffer * __intel_framebuffer_create(struct drm_device *dev, @@ -1238,9 +1251,9 @@ void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe); void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe); void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe); int intel_prepare_plane_fb(struct drm_plane *plane, - const struct drm_plane_state *new_state); + struct drm_plane_state *new_state); void intel_cleanup_plane_fb(struct drm_plane *plane, - const struct drm_plane_state *old_state); + struct drm_plane_state *old_state); int intel_plane_atomic_get_property(struct drm_plane *plane, const struct drm_plane_state *state, struct drm_property *property, @@ -1258,7 +1271,7 @@ unsigned int intel_tile_height(const struct drm_i915_private *dev_priv, static inline bool intel_rotation_90_or_270(unsigned int rotation) { - return rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270)); + return rotation & (DRM_ROTATE_90 | DRM_ROTATE_270); } void intel_create_rotation_property(struct drm_device *dev, @@ -1290,9 +1303,7 @@ void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); #define assert_pipe_enabled(d, p) assert_pipe(d, p, true) #define assert_pipe_disabled(d, p) assert_pipe(d, p, false) u32 intel_compute_tile_offset(int *x, int *y, - const struct drm_framebuffer *fb, int plane, - unsigned int pitch, - unsigned int rotation); + const struct intel_plane_state *state, int plane); void intel_prepare_reset(struct drm_i915_private *dev_priv); void intel_finish_reset(struct drm_i915_private *dev_priv); void hsw_enable_pc8(struct drm_i915_private *dev_priv); @@ -1335,13 +1346,14 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode, int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); -u32 intel_plane_obj_offset(struct intel_plane *intel_plane, - struct drm_i915_gem_object *obj, - unsigned int plane); +u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, unsigned int rotation); u32 skl_plane_ctl_format(uint32_t pixel_format); u32 skl_plane_ctl_tiling(uint64_t fb_modifier); u32 skl_plane_ctl_rotation(unsigned int rotation); +u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane, + unsigned int rotation); +int skl_check_plane_surface(struct intel_plane_state *plane_state); /* intel_csr.c */ void intel_csr_ucode_init(struct drm_i915_private *); @@ -1384,11 +1396,12 @@ uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes); void intel_plane_destroy(struct drm_plane *plane); void intel_edp_drrs_enable(struct intel_dp *intel_dp); void intel_edp_drrs_disable(struct intel_dp *intel_dp); -void intel_edp_drrs_invalidate(struct drm_device *dev, - unsigned frontbuffer_bits); -void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits); +void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits); +void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits); bool intel_digital_port_connected(struct drm_i915_private *dev_priv, - struct intel_digital_port *port); + struct intel_digital_port *port); void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, @@ -1561,13 +1574,13 @@ static inline void intel_backlight_device_unregister(struct intel_connector *con /* intel_psr.c */ void intel_psr_enable(struct intel_dp *intel_dp); void intel_psr_disable(struct intel_dp *intel_dp); -void intel_psr_invalidate(struct drm_device *dev, +void intel_psr_invalidate(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits); -void intel_psr_flush(struct drm_device *dev, +void intel_psr_flush(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits, enum fb_op_origin origin); void intel_psr_init(struct drm_device *dev); -void intel_psr_single_frame_update(struct drm_device *dev, +void intel_psr_single_frame_update(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits); /* intel_runtime_pm.c */ @@ -1667,13 +1680,6 @@ enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv) atomic_dec(&dev_priv->pm.wakeref_count); } -/* TODO: convert users of these to rely instead on proper RPM refcounting */ -#define DISABLE_RPM_WAKEREF_ASSERTS(dev_priv) \ - disable_rpm_wakeref_asserts(dev_priv) - -#define ENABLE_RPM_WAKEREF_ASSERTS(dev_priv) \ - enable_rpm_wakeref_asserts(dev_priv) - void intel_runtime_pm_get(struct drm_i915_private *dev_priv); bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv); void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); @@ -1699,11 +1705,11 @@ void intel_gpu_ips_init(struct drm_i915_private *dev_priv); void intel_gpu_ips_teardown(void); void intel_init_gt_powersave(struct drm_i915_private *dev_priv); void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv); +void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv); void intel_enable_gt_powersave(struct drm_i915_private *dev_priv); +void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv); void intel_disable_gt_powersave(struct drm_i915_private *dev_priv); void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv); -void intel_reset_gt_powersave(struct drm_i915_private *dev_priv); -void gen6_update_ring_freq(struct drm_i915_private *dev_priv); void gen6_rps_busy(struct drm_i915_private *dev_priv); void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); void gen6_rps_idle(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c new file mode 100644 index 000000000000..2e96a86105c2 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -0,0 +1,323 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "i915_drv.h" +#include "intel_ringbuffer.h" +#include "intel_lrc.h" + +static const struct engine_info { + const char *name; + unsigned exec_id; + enum intel_engine_hw_id hw_id; + u32 mmio_base; + unsigned irq_shift; + int (*init_legacy)(struct intel_engine_cs *engine); + int (*init_execlists)(struct intel_engine_cs *engine); +} intel_engines[] = { + [RCS] = { + .name = "render ring", + .exec_id = I915_EXEC_RENDER, + .hw_id = RCS_HW, + .mmio_base = RENDER_RING_BASE, + .irq_shift = GEN8_RCS_IRQ_SHIFT, + .init_execlists = logical_render_ring_init, + .init_legacy = intel_init_render_ring_buffer, + }, + [BCS] = { + .name = "blitter ring", + .exec_id = I915_EXEC_BLT, + .hw_id = BCS_HW, + .mmio_base = BLT_RING_BASE, + .irq_shift = GEN8_BCS_IRQ_SHIFT, + .init_execlists = logical_xcs_ring_init, + .init_legacy = intel_init_blt_ring_buffer, + }, + [VCS] = { + .name = "bsd ring", + .exec_id = I915_EXEC_BSD, + .hw_id = VCS_HW, + .mmio_base = GEN6_BSD_RING_BASE, + .irq_shift = GEN8_VCS1_IRQ_SHIFT, + .init_execlists = logical_xcs_ring_init, + .init_legacy = intel_init_bsd_ring_buffer, + }, + [VCS2] = { + .name = "bsd2 ring", + .exec_id = I915_EXEC_BSD, + .hw_id = VCS2_HW, + .mmio_base = GEN8_BSD2_RING_BASE, + .irq_shift = GEN8_VCS2_IRQ_SHIFT, + .init_execlists = logical_xcs_ring_init, + .init_legacy = intel_init_bsd2_ring_buffer, + }, + [VECS] = { + .name = "video enhancement ring", + .exec_id = I915_EXEC_VEBOX, + .hw_id = VECS_HW, + .mmio_base = VEBOX_RING_BASE, + .irq_shift = GEN8_VECS_IRQ_SHIFT, + .init_execlists = logical_xcs_ring_init, + .init_legacy = intel_init_vebox_ring_buffer, + }, +}; + +static struct intel_engine_cs * +intel_engine_setup(struct drm_i915_private *dev_priv, + enum intel_engine_id id) +{ + const struct engine_info *info = &intel_engines[id]; + struct intel_engine_cs *engine = &dev_priv->engine[id]; + + engine->id = id; + engine->i915 = dev_priv; + engine->name = info->name; + engine->exec_id = info->exec_id; + engine->hw_id = engine->guc_id = info->hw_id; + engine->mmio_base = info->mmio_base; + engine->irq_shift = info->irq_shift; + + return engine; +} + +/** + * intel_engines_init() - allocate, populate and init the Engine Command Streamers + * @dev: DRM device. + * + * Return: non-zero if the initialization failed. + */ +int intel_engines_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_device_info *device_info = mkwrite_device_info(dev_priv); + unsigned int mask = 0; + int (*init)(struct intel_engine_cs *engine); + unsigned int i; + int ret; + + WARN_ON(INTEL_INFO(dev_priv)->ring_mask == 0); + WARN_ON(INTEL_INFO(dev_priv)->ring_mask & + GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES)); + + for (i = 0; i < ARRAY_SIZE(intel_engines); i++) { + if (!HAS_ENGINE(dev_priv, i)) + continue; + + if (i915.enable_execlists) + init = intel_engines[i].init_execlists; + else + init = intel_engines[i].init_legacy; + + if (!init) + continue; + + ret = init(intel_engine_setup(dev_priv, i)); + if (ret) + goto cleanup; + + mask |= ENGINE_MASK(i); + } + + /* + * Catch failures to update intel_engines table when the new engines + * are added to the driver by a warning and disabling the forgotten + * engines. + */ + if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask)) + device_info->ring_mask = mask; + + device_info->num_rings = hweight32(mask); + + return 0; + +cleanup: + for (i = 0; i < I915_NUM_ENGINES; i++) { + if (i915.enable_execlists) + intel_logical_ring_cleanup(&dev_priv->engine[i]); + else + intel_engine_cleanup(&dev_priv->engine[i]); + } + + return ret; +} + +void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno) +{ + struct drm_i915_private *dev_priv = engine->i915; + + /* Our semaphore implementation is strictly monotonic (i.e. we proceed + * so long as the semaphore value in the register/page is greater + * than the sync value), so whenever we reset the seqno, + * so long as we reset the tracking semaphore value to 0, it will + * always be before the next request's seqno. If we don't reset + * the semaphore value, then when the seqno moves backwards all + * future waits will complete instantly (causing rendering corruption). + */ + if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { + I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); + I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); + if (HAS_VEBOX(dev_priv)) + I915_WRITE(RING_SYNC_2(engine->mmio_base), 0); + } + if (dev_priv->semaphore) { + struct page *page = i915_vma_first_page(dev_priv->semaphore); + void *semaphores; + + /* Semaphores are in noncoherent memory, flush to be safe */ + semaphores = kmap(page); + memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0), + 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size); + drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0), + I915_NUM_ENGINES * gen8_semaphore_seqno_size); + kunmap(page); + } + memset(engine->semaphore.sync_seqno, 0, + sizeof(engine->semaphore.sync_seqno)); + + intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); + if (engine->irq_seqno_barrier) + engine->irq_seqno_barrier(engine); + engine->last_submitted_seqno = seqno; + + engine->hangcheck.seqno = seqno; + + /* After manually advancing the seqno, fake the interrupt in case + * there are any waiters for that seqno. + */ + intel_engine_wakeup(engine); +} + +void intel_engine_init_hangcheck(struct intel_engine_cs *engine) +{ + memset(&engine->hangcheck, 0, sizeof(engine->hangcheck)); + clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings); +} + +static void intel_engine_init_requests(struct intel_engine_cs *engine) +{ + init_request_active(&engine->last_request, NULL); + INIT_LIST_HEAD(&engine->request_list); +} + +/** + * intel_engines_setup_common - setup engine state not requiring hw access + * @engine: Engine to setup. + * + * Initializes @engine@ structure members shared between legacy and execlists + * submission modes which do not require hardware access. + * + * Typically done early in the submission mode specific engine setup stage. + */ +void intel_engine_setup_common(struct intel_engine_cs *engine) +{ + INIT_LIST_HEAD(&engine->buffers); + INIT_LIST_HEAD(&engine->execlist_queue); + spin_lock_init(&engine->execlist_lock); + + engine->fence_context = fence_context_alloc(1); + + intel_engine_init_requests(engine); + intel_engine_init_hangcheck(engine); + i915_gem_batch_pool_init(engine, &engine->batch_pool); + + intel_engine_init_cmd_parser(engine); +} + +int intel_engine_create_scratch(struct intel_engine_cs *engine, int size) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int ret; + + WARN_ON(engine->scratch); + + obj = i915_gem_object_create_stolen(&engine->i915->drm, size); + if (!obj) + obj = i915_gem_object_create(&engine->i915->drm, size); + if (IS_ERR(obj)) { + DRM_ERROR("Failed to allocate scratch page\n"); + return PTR_ERR(obj); + } + + vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err_unref; + } + + ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH); + if (ret) + goto err_unref; + + engine->scratch = vma; + DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", + engine->name, i915_ggtt_offset(vma)); + return 0; + +err_unref: + i915_gem_object_put(obj); + return ret; +} + +static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine) +{ + i915_vma_unpin_and_release(&engine->scratch); +} + +/** + * intel_engines_init_common - initialize cengine state which might require hw access + * @engine: Engine to initialize. + * + * Initializes @engine@ structure members shared between legacy and execlists + * submission modes which do require hardware access. + * + * Typcally done at later stages of submission mode specific engine setup. + * + * Returns zero on success or an error code on failure. + */ +int intel_engine_init_common(struct intel_engine_cs *engine) +{ + int ret; + + ret = intel_engine_init_breadcrumbs(engine); + if (ret) + return ret; + + return 0; +} + +/** + * intel_engines_cleanup_common - cleans up the engine state created by + * the common initiailizers. + * @engine: Engine to cleanup. + * + * This cleans up everything created by the common helpers. + */ +void intel_engine_cleanup_common(struct intel_engine_cs *engine) +{ + intel_engine_cleanup_scratch(engine); + + intel_engine_fini_breadcrumbs(engine); + intel_engine_cleanup_cmd_parser(engine); + i915_gem_batch_pool_fini(&engine->batch_pool); +} diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 6a7ad3ed1463..bf8b22ad9aed 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -190,9 +190,13 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv) dpfc_ctl |= DPFC_CTL_LIMIT_2X; else dpfc_ctl |= DPFC_CTL_LIMIT_1X; - dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg; - I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset); + if (params->fb.fence_reg != I915_FENCE_REG_NONE) { + dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg; + I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset); + } else { + I915_WRITE(DPFC_FENCE_YOFF, 0); + } /* enable it... */ I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); @@ -244,21 +248,29 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv) dpfc_ctl |= DPFC_CTL_LIMIT_1X; break; } - dpfc_ctl |= DPFC_CTL_FENCE_EN; - if (IS_GEN5(dev_priv)) - dpfc_ctl |= params->fb.fence_reg; + + if (params->fb.fence_reg != I915_FENCE_REG_NONE) { + dpfc_ctl |= DPFC_CTL_FENCE_EN; + if (IS_GEN5(dev_priv)) + dpfc_ctl |= params->fb.fence_reg; + if (IS_GEN6(dev_priv)) { + I915_WRITE(SNB_DPFC_CTL_SA, + SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); + I915_WRITE(DPFC_CPU_FENCE_OFFSET, + params->crtc.fence_y_offset); + } + } else { + if (IS_GEN6(dev_priv)) { + I915_WRITE(SNB_DPFC_CTL_SA, 0); + I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0); + } + } I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset); I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID); /* enable it... */ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); - if (IS_GEN6(dev_priv)) { - I915_WRITE(SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); - I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); - } - intel_fbc_recompress(dev_priv); } @@ -305,7 +317,15 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv) break; } - dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; + if (params->fb.fence_reg != I915_FENCE_REG_NONE) { + dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; + I915_WRITE(SNB_DPFC_CTL_SA, + SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); + I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); + } else { + I915_WRITE(SNB_DPFC_CTL_SA,0); + I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0); + } if (dev_priv->fbc.false_color) dpfc_ctl |= FBC_CTL_FALSE_COLOR; @@ -324,10 +344,6 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv) I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); - I915_WRITE(SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); - I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); - intel_fbc_recompress(dev_priv); } @@ -494,7 +510,7 @@ static bool multiple_pipes_ok(struct intel_crtc *crtc, if (!no_fbc_on_multiple_pipes(dev_priv)) return true; - if (plane_state->visible) + if (plane_state->base.visible) fbc->visible_pipes_mask |= (1 << pipe); else fbc->visible_pipes_mask &= ~(1 << pipe); @@ -709,6 +725,14 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) return effective_w <= max_w && effective_h <= max_h; } +/* XXX replace me when we have VMA tracking for intel_plane_state */ +static int get_fence_id(struct drm_framebuffer *fb) +{ + struct i915_vma *vma = i915_gem_object_to_ggtt(intel_fb_obj(fb), NULL); + + return vma && vma->fence ? vma->fence->id : I915_FENCE_REG_NONE; +} + static void intel_fbc_update_state_cache(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) @@ -725,9 +749,9 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, ilk_pipe_pixel_rate(crtc_state); cache->plane.rotation = plane_state->base.rotation; - cache->plane.src_w = drm_rect_width(&plane_state->src) >> 16; - cache->plane.src_h = drm_rect_height(&plane_state->src) >> 16; - cache->plane.visible = plane_state->visible; + cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16; + cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16; + cache->plane.visible = plane_state->base.visible; if (!cache->plane.visible) return; @@ -737,11 +761,11 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, /* FIXME: We lack the proper locking here, so only run this on the * platforms that need. */ if (IS_GEN(dev_priv, 5, 6)) - cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj); + cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL); cache->fb.pixel_format = fb->pixel_format; cache->fb.stride = fb->pitches[0]; - cache->fb.fence_reg = obj->fence_reg; - cache->fb.tiling_mode = obj->tiling_mode; + cache->fb.fence_reg = get_fence_id(fb); + cache->fb.tiling_mode = i915_gem_object_get_tiling(obj); } static bool intel_fbc_can_activate(struct intel_crtc *crtc) @@ -768,14 +792,20 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) /* The use of a CPU fence is mandatory in order to detect writes * by the CPU to the scanout and trigger updates to the FBC. + * + * Note that is possible for a tiled surface to be unmappable (and + * so have no fence associated with it) due to aperture constaints + * at the time of pinning. */ if (cache->fb.tiling_mode != I915_TILING_X || cache->fb.fence_reg == I915_FENCE_REG_NONE) { - fbc->no_fbc_reason = "framebuffer not tiled or fenced"; - return false; + if (INTEL_GEN(dev_priv) < 5) { + fbc->no_fbc_reason = "framebuffer not tiled or fenced"; + return false; + } } if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) && - cache->plane.rotation != BIT(DRM_ROTATE_0)) { + cache->plane.rotation != DRM_ROTATE_0) { fbc->no_fbc_reason = "rotation unsupported"; return false; } @@ -1050,7 +1080,7 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, struct intel_plane_state *intel_plane_state = to_intel_plane_state(plane_state); - if (!intel_plane_state->visible) + if (!intel_plane_state->base.visible) continue; for_each_crtc_in_state(state, crtc, crtc_state, j) { @@ -1075,6 +1105,8 @@ out: /** * intel_fbc_enable: tries to enable FBC on the CRTC * @crtc: the CRTC + * @crtc_state: corresponding &drm_crtc_state for @crtc + * @plane_state: corresponding &drm_plane_state for the primary plane of @crtc * * This function checks if the given CRTC was chosen for FBC, then enables it if * possible. Notice that it doesn't activate FBC. It is valid to call @@ -1163,11 +1195,8 @@ void intel_fbc_disable(struct intel_crtc *crtc) return; mutex_lock(&fbc->lock); - if (fbc->crtc == crtc) { - WARN_ON(!fbc->enabled); - WARN_ON(fbc->active); + if (fbc->crtc == crtc) __intel_fbc_disable(dev_priv); - } mutex_unlock(&fbc->lock); cancel_work_sync(&fbc->work.work); @@ -1212,7 +1241,7 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv) for_each_intel_crtc(&dev_priv->drm, crtc) if (intel_crtc_active(&crtc->base) && - to_intel_plane_state(crtc->base.primary->state)->visible) + to_intel_plane_state(crtc->base.primary->state)->base.visible) dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe); } @@ -1230,12 +1259,29 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) if (i915.enable_fbc >= 0) return !!i915.enable_fbc; + if (!HAS_FBC(dev_priv)) + return 0; + if (IS_BROADWELL(dev_priv)) return 1; return 0; } +static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv) +{ +#ifdef CONFIG_INTEL_IOMMU + /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ + if (intel_iommu_gfx_mapped && + (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) { + DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n"); + return true; + } +#endif + + return false; +} + /** * intel_fbc_init - Initialize FBC * @dev_priv: the i915 device @@ -1253,6 +1299,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) fbc->active = false; fbc->work.scheduled = false; + if (need_fbc_vtd_wa(dev_priv)) + mkwrite_device_info(dev_priv)->has_fbc = false; + i915.enable_fbc = intel_sanitize_fbc_option(dev_priv); DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc); diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 86b00c6db1a6..4003e4908c09 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -34,7 +34,6 @@ #include <linux/tty.h> #include <linux/sysrq.h> #include <linux/delay.h> -#include <linux/fb.h> #include <linux/init.h> #include <linux/vga_switcheroo.h> @@ -42,6 +41,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_fb_helper.h> #include "intel_drv.h" +#include "intel_frontbuffer.h" #include <drm/i915_drm.h> #include "i915_drv.h" @@ -159,7 +159,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper, fb = __intel_framebuffer_create(dev, &mode_cmd, obj); if (IS_ERR(fb)) { - drm_gem_object_unreference(&obj->base); + i915_gem_object_put(obj); ret = PTR_ERR(fb); goto out; } @@ -187,9 +187,8 @@ static int intelfb_create(struct drm_fb_helper *helper, struct fb_info *info; struct drm_framebuffer *fb; struct i915_vma *vma; - struct drm_i915_gem_object *obj; bool prealloc = false; - void *vaddr; + void __iomem *vaddr; int ret; if (intel_fb && @@ -215,17 +214,17 @@ static int intelfb_create(struct drm_fb_helper *helper, sizes->fb_height = intel_fb->base.height; } - obj = intel_fb->obj; - mutex_lock(&dev->struct_mutex); /* Pin the GGTT vma for our access via info->screen_base. * This also validates that any existing fb inherited from the * BIOS is suitable for own access. */ - ret = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0)); - if (ret) + vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); goto out_unlock; + } info = drm_fb_helper_alloc_fbi(helper); if (IS_ERR(info)) { @@ -245,13 +244,11 @@ static int intelfb_create(struct drm_fb_helper *helper, info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &intelfb_ops; - vma = i915_gem_obj_to_ggtt(obj); - /* setup aperture base/size for vesafb takeover */ info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].size = ggtt->mappable_end; - info->fix.smem_start = dev->mode_config.fb_base + vma->node.start; + info->fix.smem_start = dev->mode_config.fb_base + i915_ggtt_offset(vma); info->fix.smem_len = vma->node.size; vaddr = i915_vma_pin_iomap(vma); @@ -273,14 +270,14 @@ static int intelfb_create(struct drm_fb_helper *helper, * If the object is stolen however, it will be full of whatever * garbage was left in there. */ - if (ifbdev->fb->obj->stolen && !prealloc) + if (intel_fb->obj->stolen && !prealloc) memset_io(info->screen_base, 0, info->screen_size); /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ - DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08llx, bo %p\n", - fb->width, fb->height, - i915_gem_obj_ggtt_offset(obj), obj); + DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n", + fb->width, fb->height, i915_ggtt_offset(vma)); + ifbdev->vma = vma; mutex_unlock(&dev->struct_mutex); vga_switcheroo_client_fb_set(dev->pdev, info); @@ -289,7 +286,7 @@ static int intelfb_create(struct drm_fb_helper *helper, out_destroy_fbi: drm_fb_helper_release_fbi(helper); out_unpin: - intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0)); + intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0); out_unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -554,7 +551,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev) if (ifbdev->fb) { mutex_lock(&ifbdev->helper.dev->struct_mutex); - intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0)); + intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0); mutex_unlock(&ifbdev->helper.dev->struct_mutex); drm_framebuffer_remove(&ifbdev->fb->base); @@ -768,7 +765,7 @@ void intel_fbdev_fini(struct drm_device *dev) if (!ifbdev) return; - flush_work(&dev_priv->fbdev_suspend_work); + cancel_work_sync(&dev_priv->fbdev_suspend_work); if (!current_is_async()) intel_fbdev_sync(ifbdev); @@ -782,7 +779,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous struct intel_fbdev *ifbdev = dev_priv->fbdev; struct fb_info *info; - if (!ifbdev) + if (!ifbdev || !ifbdev->fb) return; info = ifbdev->helper.fbdev; @@ -827,31 +824,28 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous void intel_fbdev_output_poll_changed(struct drm_device *dev) { - struct drm_i915_private *dev_priv = to_i915(dev); - if (dev_priv->fbdev) - drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); + struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; + + if (ifbdev && ifbdev->fb) + drm_fb_helper_hotplug_event(&ifbdev->helper); } void intel_fbdev_restore_mode(struct drm_device *dev) { - int ret; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_fbdev *ifbdev = dev_priv->fbdev; - struct drm_fb_helper *fb_helper; + struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; if (!ifbdev) return; intel_fbdev_sync(ifbdev); + if (!ifbdev->fb) + return; - fb_helper = &ifbdev->helper; - - ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); - if (ret) { + if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper)) { DRM_DEBUG("failed to restore crtc mode\n"); } else { - mutex_lock(&fb_helper->dev->struct_mutex); + mutex_lock(&dev->struct_mutex); intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT); - mutex_unlock(&fb_helper->dev->struct_mutex); + mutex_unlock(&dev->struct_mutex); } } diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c index ac85357010b4..966de4c7c7a2 100644 --- a/drivers/gpu/drm/i915/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/intel_frontbuffer.c @@ -63,47 +63,30 @@ #include <drm/drmP.h> #include "intel_drv.h" +#include "intel_frontbuffer.h" #include "i915_drv.h" -/** - * intel_fb_obj_invalidate - invalidate frontbuffer object - * @obj: GEM object to invalidate - * @origin: which operation caused the invalidation - * - * This function gets called every time rendering on the given object starts and - * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must - * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed - * until the rendering completes or a flip on this frontbuffer plane is - * scheduled. - */ -void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, - enum fb_op_origin origin) +void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, + enum fb_op_origin origin, + unsigned int frontbuffer_bits) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - - if (!obj->frontbuffer_bits) - return; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); if (origin == ORIGIN_CS) { - mutex_lock(&dev_priv->fb_tracking.lock); - dev_priv->fb_tracking.busy_bits - |= obj->frontbuffer_bits; - dev_priv->fb_tracking.flip_bits - &= ~obj->frontbuffer_bits; - mutex_unlock(&dev_priv->fb_tracking.lock); + spin_lock(&dev_priv->fb_tracking.lock); + dev_priv->fb_tracking.busy_bits |= frontbuffer_bits; + dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits; + spin_unlock(&dev_priv->fb_tracking.lock); } - intel_psr_invalidate(dev, obj->frontbuffer_bits); - intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits); - intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin); + intel_psr_invalidate(dev_priv, frontbuffer_bits); + intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits); + intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin); } /** * intel_frontbuffer_flush - flush frontbuffer - * @dev: DRM device + * @dev_priv: i915 device * @frontbuffer_bits: frontbuffer plane tracking bits * @origin: which operation caused the flush * @@ -113,64 +96,45 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, * * Can be called without any locks held. */ -static void intel_frontbuffer_flush(struct drm_device *dev, +static void intel_frontbuffer_flush(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits, enum fb_op_origin origin) { - struct drm_i915_private *dev_priv = to_i915(dev); - /* Delay flushing when rings are still busy.*/ - mutex_lock(&dev_priv->fb_tracking.lock); + spin_lock(&dev_priv->fb_tracking.lock); frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits; - mutex_unlock(&dev_priv->fb_tracking.lock); + spin_unlock(&dev_priv->fb_tracking.lock); if (!frontbuffer_bits) return; - intel_edp_drrs_flush(dev, frontbuffer_bits); - intel_psr_flush(dev, frontbuffer_bits, origin); + intel_edp_drrs_flush(dev_priv, frontbuffer_bits); + intel_psr_flush(dev_priv, frontbuffer_bits, origin); intel_fbc_flush(dev_priv, frontbuffer_bits, origin); } -/** - * intel_fb_obj_flush - flush frontbuffer object - * @obj: GEM object to flush - * @retire: set when retiring asynchronous rendering - * @origin: which operation caused the flush - * - * This function gets called every time rendering on the given object has - * completed and frontbuffer caching can be started again. If @retire is true - * then any delayed flushes will be unblocked. - */ -void intel_fb_obj_flush(struct drm_i915_gem_object *obj, - bool retire, enum fb_op_origin origin) +void __intel_fb_obj_flush(struct drm_i915_gem_object *obj, + bool retire, + enum fb_op_origin origin, + unsigned int frontbuffer_bits) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - unsigned frontbuffer_bits; - - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - - if (!obj->frontbuffer_bits) - return; - - frontbuffer_bits = obj->frontbuffer_bits; + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); if (retire) { - mutex_lock(&dev_priv->fb_tracking.lock); + spin_lock(&dev_priv->fb_tracking.lock); /* Filter out new bits since rendering started. */ frontbuffer_bits &= dev_priv->fb_tracking.busy_bits; - dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; - mutex_unlock(&dev_priv->fb_tracking.lock); + spin_unlock(&dev_priv->fb_tracking.lock); } - intel_frontbuffer_flush(dev, frontbuffer_bits, origin); + if (frontbuffer_bits) + intel_frontbuffer_flush(dev_priv, frontbuffer_bits, origin); } /** * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip - * @dev: DRM device + * @dev_priv: i915 device * @frontbuffer_bits: frontbuffer plane tracking bits * * This function gets called after scheduling a flip on @obj. The actual @@ -180,23 +144,21 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj, * * Can be called without any locks held. */ -void intel_frontbuffer_flip_prepare(struct drm_device *dev, +void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits) { - struct drm_i915_private *dev_priv = to_i915(dev); - - mutex_lock(&dev_priv->fb_tracking.lock); + spin_lock(&dev_priv->fb_tracking.lock); dev_priv->fb_tracking.flip_bits |= frontbuffer_bits; /* Remove stale busy bits due to the old buffer. */ dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; - mutex_unlock(&dev_priv->fb_tracking.lock); + spin_unlock(&dev_priv->fb_tracking.lock); - intel_psr_single_frame_update(dev, frontbuffer_bits); + intel_psr_single_frame_update(dev_priv, frontbuffer_bits); } /** * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip - * @dev: DRM device + * @dev_priv: i915 device * @frontbuffer_bits: frontbuffer plane tracking bits * * This function gets called after the flip has been latched and will complete @@ -204,23 +166,23 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev, * * Can be called without any locks held. */ -void intel_frontbuffer_flip_complete(struct drm_device *dev, +void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits) { - struct drm_i915_private *dev_priv = to_i915(dev); - - mutex_lock(&dev_priv->fb_tracking.lock); + spin_lock(&dev_priv->fb_tracking.lock); /* Mask any cancelled flips. */ frontbuffer_bits &= dev_priv->fb_tracking.flip_bits; dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits; - mutex_unlock(&dev_priv->fb_tracking.lock); + spin_unlock(&dev_priv->fb_tracking.lock); - intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP); + if (frontbuffer_bits) + intel_frontbuffer_flush(dev_priv, + frontbuffer_bits, ORIGIN_FLIP); } /** * intel_frontbuffer_flip - synchronous frontbuffer flip - * @dev: DRM device + * @dev_priv: i915 device * @frontbuffer_bits: frontbuffer plane tracking bits * * This function gets called after scheduling a flip on @obj. This is for @@ -229,15 +191,13 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev, * * Can be called without any locks held. */ -void intel_frontbuffer_flip(struct drm_device *dev, +void intel_frontbuffer_flip(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits) { - struct drm_i915_private *dev_priv = to_i915(dev); - - mutex_lock(&dev_priv->fb_tracking.lock); + spin_lock(&dev_priv->fb_tracking.lock); /* Remove stale busy bits due to the old buffer. */ dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; - mutex_unlock(&dev_priv->fb_tracking.lock); + spin_unlock(&dev_priv->fb_tracking.lock); - intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP); + intel_frontbuffer_flush(dev_priv, frontbuffer_bits, ORIGIN_FLIP); } diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/intel_frontbuffer.h new file mode 100644 index 000000000000..76ceb539f9f0 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_frontbuffer.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2014-2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef __INTEL_FRONTBUFFER_H__ +#define __INTEL_FRONTBUFFER_H__ + +struct drm_i915_private; +struct drm_i915_gem_object; + +void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv, + unsigned frontbuffer_bits); +void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv, + unsigned frontbuffer_bits); +void intel_frontbuffer_flip(struct drm_i915_private *dev_priv, + unsigned frontbuffer_bits); + +void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, + enum fb_op_origin origin, + unsigned int frontbuffer_bits); +void __intel_fb_obj_flush(struct drm_i915_gem_object *obj, + bool retire, + enum fb_op_origin origin, + unsigned int frontbuffer_bits); + +/** + * intel_fb_obj_invalidate - invalidate frontbuffer object + * @obj: GEM object to invalidate + * @origin: which operation caused the invalidation + * + * This function gets called every time rendering on the given object starts and + * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must + * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed + * until the rendering completes or a flip on this frontbuffer plane is + * scheduled. + */ +static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, + enum fb_op_origin origin) +{ + unsigned int frontbuffer_bits; + + frontbuffer_bits = atomic_read(&obj->frontbuffer_bits); + if (!frontbuffer_bits) + return; + + __intel_fb_obj_invalidate(obj, origin, frontbuffer_bits); +} + +/** + * intel_fb_obj_flush - flush frontbuffer object + * @obj: GEM object to flush + * @retire: set when retiring asynchronous rendering + * @origin: which operation caused the flush + * + * This function gets called every time rendering on the given object has + * completed and frontbuffer caching can be started again. If @retire is true + * then any delayed flushes will be unblocked. + */ +static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj, + bool retire, + enum fb_op_origin origin) +{ + unsigned int frontbuffer_bits; + + frontbuffer_bits = atomic_read(&obj->frontbuffer_bits); + if (!frontbuffer_bits) + return; + + __intel_fb_obj_flush(obj, retire, origin, frontbuffer_bits); +} + +#endif /* __INTEL_FRONTBUFFER_H__ */ diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 3e3e743740c0..c97326269588 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -63,26 +63,25 @@ struct drm_i915_gem_request; * retcode: errno from last guc_submit() */ struct i915_guc_client { - struct drm_i915_gem_object *client_obj; + struct i915_vma *vma; void *client_base; /* first page (only) of above */ struct i915_gem_context *owner; struct intel_guc *guc; + + uint32_t engines; /* bitmap of (host) engine ids */ uint32_t priority; uint32_t ctx_index; - uint32_t proc_desc_offset; + uint32_t doorbell_offset; uint32_t cookie; uint16_t doorbell_id; - uint16_t padding; /* Maintain alignment */ + uint16_t padding[3]; /* Maintain alignment */ uint32_t wq_offset; uint32_t wq_size; uint32_t wq_tail; - uint32_t unused; /* Was 'wq_head' */ - uint32_t no_wq_space; - uint32_t q_fail; /* No longer used */ uint32_t b_fail; int retcode; @@ -125,11 +124,10 @@ struct intel_guc_fw { struct intel_guc { struct intel_guc_fw guc_fw; uint32_t log_flags; - struct drm_i915_gem_object *log_obj; - - struct drm_i915_gem_object *ads_obj; + struct i915_vma *log_vma; - struct drm_i915_gem_object *ctx_pool_obj; + struct i915_vma *ads_vma; + struct i915_vma *ctx_pool_vma; struct ida ctx_ids; struct i915_guc_client *execbuf_client; @@ -160,7 +158,6 @@ extern int intel_guc_resume(struct drm_device *dev); int i915_guc_submission_init(struct drm_i915_private *dev_priv); int i915_guc_submission_enable(struct drm_i915_private *dev_priv); int i915_guc_wq_check_space(struct drm_i915_gem_request *rq); -int i915_guc_submit(struct drm_i915_gem_request *rq); void i915_guc_submission_disable(struct drm_i915_private *dev_priv); void i915_guc_submission_fini(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 944786d7075b..e40db2d2ae99 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -155,6 +155,7 @@ * * +-------------------------------+ * | guc_css_header | + * | | * | contains major/minor version | * +-------------------------------+ * | uCode | @@ -176,10 +177,10 @@ * * 1. Header, uCode and RSA are must-have components. * 2. All firmware components, if they present, are in the sequence illustrated - * in the layout table above. + * in the layout table above. * 3. Length info of each component can be found in header, in dwords. * 4. Modulus and exponent key are not required by driver. They may not appear - * in fw. So driver will load a truncated firmware in this case. + * in fw. So driver will load a truncated firmware in this case. */ struct guc_css_header { diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 605c69658d2c..324812d69b70 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -59,13 +59,25 @@ * */ -#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6_1.bin" +#define SKL_FW_MAJOR 6 +#define SKL_FW_MINOR 1 + +#define BXT_FW_MAJOR 8 +#define BXT_FW_MINOR 7 + +#define KBL_FW_MAJOR 9 +#define KBL_FW_MINOR 14 + +#define GUC_FW_PATH(platform, major, minor) \ + "i915/" __stringify(platform) "_guc_ver" __stringify(major) "_" __stringify(minor) ".bin" + +#define I915_SKL_GUC_UCODE GUC_FW_PATH(skl, SKL_FW_MAJOR, SKL_FW_MINOR) MODULE_FIRMWARE(I915_SKL_GUC_UCODE); -#define I915_BXT_GUC_UCODE "i915/bxt_guc_ver8_7.bin" +#define I915_BXT_GUC_UCODE GUC_FW_PATH(bxt, BXT_FW_MAJOR, BXT_FW_MINOR) MODULE_FIRMWARE(I915_BXT_GUC_UCODE); -#define I915_KBL_GUC_UCODE "i915/kbl_guc_ver9_14.bin" +#define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR) MODULE_FIRMWARE(I915_KBL_GUC_UCODE); /* User-friendly representation of an enum */ @@ -181,16 +193,15 @@ static void set_guc_init_params(struct drm_i915_private *dev_priv) i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT; } - if (guc->ads_obj) { - u32 ads = (u32)i915_gem_obj_ggtt_offset(guc->ads_obj) - >> PAGE_SHIFT; + if (guc->ads_vma) { + u32 ads = i915_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT; params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT; params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED; } /* If GuC submission is enabled, set up additional parameters here */ if (i915.enable_guc_submission) { - u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj); + u32 pgs = i915_ggtt_offset(dev_priv->guc.ctx_pool_vma); u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16; pgs >>= PAGE_SHIFT; @@ -238,12 +249,12 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv, * Note that GuC needs the CSS header plus uKernel code to be copied by the * DMA engine in one operation, whereas the RSA signature is loaded via MMIO. */ -static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv) +static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv, + struct i915_vma *vma) { struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; - struct drm_i915_gem_object *fw_obj = guc_fw->guc_fw_obj; unsigned long offset; - struct sg_table *sg = fw_obj->pages; + struct sg_table *sg = vma->pages; u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT]; int i, ret = 0; @@ -260,7 +271,7 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv) I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size); /* Set the source address for the new blob */ - offset = i915_gem_obj_ggtt_offset(fw_obj) + guc_fw->header_offset; + offset = i915_ggtt_offset(vma) + guc_fw->header_offset; I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); @@ -315,6 +326,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv) { struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; struct drm_device *dev = &dev_priv->drm; + struct i915_vma *vma; int ret; ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false); @@ -323,10 +335,10 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv) return ret; } - ret = i915_gem_obj_ggtt_pin(guc_fw->guc_fw_obj, 0, 0); - if (ret) { - DRM_DEBUG_DRIVER("pin failed %d\n", ret); - return ret; + vma = i915_gem_object_ggtt_pin(guc_fw->guc_fw_obj, NULL, 0, 0, 0); + if (IS_ERR(vma)) { + DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma)); + return PTR_ERR(vma); } /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */ @@ -349,7 +361,9 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv) } /* WaC6DisallowByGfxPause*/ - I915_WRITE(GEN6_GFXPAUSE, 0x30FFF); + if (IS_SKL_REVID(dev, 0, SKL_REVID_C0) || + IS_BXT_REVID(dev, 0, BXT_REVID_B0)) + I915_WRITE(GEN6_GFXPAUSE, 0x30FFF); if (IS_BROXTON(dev)) I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE); @@ -367,7 +381,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv) set_guc_init_params(dev_priv); - ret = guc_ucode_xfer_dma(dev_priv); + ret = guc_ucode_xfer_dma(dev_priv, vma); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); @@ -375,7 +389,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv) * We keep the object pages for reuse during resume. But we can unpin it * now that DMA has completed, so it doesn't continue to take up space. */ - i915_gem_object_ggtt_unpin(guc_fw->guc_fw_obj); + i915_vma_unpin(vma); return ret; } @@ -662,7 +676,7 @@ fail: mutex_lock(&dev->struct_mutex); obj = guc_fw->guc_fw_obj; if (obj) - drm_gem_object_unreference(&obj->base); + i915_gem_object_put(obj); guc_fw->guc_fw_obj = NULL; mutex_unlock(&dev->struct_mutex); @@ -695,16 +709,16 @@ void intel_guc_init(struct drm_device *dev) fw_path = NULL; } else if (IS_SKYLAKE(dev)) { fw_path = I915_SKL_GUC_UCODE; - guc_fw->guc_fw_major_wanted = 6; - guc_fw->guc_fw_minor_wanted = 1; + guc_fw->guc_fw_major_wanted = SKL_FW_MAJOR; + guc_fw->guc_fw_minor_wanted = SKL_FW_MINOR; } else if (IS_BROXTON(dev)) { fw_path = I915_BXT_GUC_UCODE; - guc_fw->guc_fw_major_wanted = 8; - guc_fw->guc_fw_minor_wanted = 7; + guc_fw->guc_fw_major_wanted = BXT_FW_MAJOR; + guc_fw->guc_fw_minor_wanted = BXT_FW_MINOR; } else if (IS_KABYLAKE(dev)) { fw_path = I915_KBL_GUC_UCODE; - guc_fw->guc_fw_major_wanted = 9; - guc_fw->guc_fw_minor_wanted = 14; + guc_fw->guc_fw_major_wanted = KBL_FW_MAJOR; + guc_fw->guc_fw_minor_wanted = KBL_FW_MINOR; } else { fw_path = ""; /* unknown device */ } @@ -743,7 +757,7 @@ void intel_guc_fini(struct drm_device *dev) i915_guc_submission_fini(dev_priv); if (guc_fw->guc_fw_obj) - drm_gem_object_unreference(&guc_fw->guc_fw_obj->base); + i915_gem_object_put(guc_fw->guc_fw_obj); guc_fw->guc_fw_obj = NULL; mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index f48957ea100d..334d47b5811a 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -477,7 +477,8 @@ void intel_hpd_init(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); } -void i915_hpd_poll_init_work(struct work_struct *work) { +static void i915_hpd_poll_init_work(struct work_struct *work) +{ struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, hotplug.poll_init_work); @@ -525,7 +526,6 @@ void i915_hpd_poll_init_work(struct work_struct *work) { /** * intel_hpd_poll_init - enables/disables polling for connectors with hpd * @dev_priv: i915 device instance - * @enabled: Whether to enable or disable polling * * This function enables polling for all connectors, regardless of whether or * not they support hotplug detection. Under certain conditions HPD may not be diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 414ddda43922..6b49df4316f4 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -288,7 +288,6 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine) /** * intel_lr_context_descriptor_update() - calculate & cache the descriptor * descriptor for a pinned context - * * @ctx: Context to work on * @engine: Engine the descriptor will be used with * @@ -297,12 +296,13 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine) * expensive to calculate, we'll just do it once and cache the result, * which remains valid until the context is unpinned. * - * This is what a descriptor looks like, from LSB to MSB: - * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template) - * bits 12-31: LRCA, GTT address of (the HWSP of) this context - * bits 32-52: ctx ID, a globally unique tag - * bits 53-54: mbz, reserved for use by hardware - * bits 55-63: group ID, currently unused and set to 0 + * This is what a descriptor looks like, from LSB to MSB:: + * + * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template) + * bits 12-31: LRCA, GTT address of (the HWSP of) this context + * bits 32-52: ctx ID, a globally unique tag + * bits 53-54: mbz, reserved for use by hardware + * bits 55-63: group ID, currently unused and set to 0 */ static void intel_lr_context_descriptor_update(struct i915_gem_context *ctx, @@ -315,7 +315,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx, desc = ctx->desc_template; /* bits 3-4 */ desc |= engine->ctx_desc_template; /* bits 0-11 */ - desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE; + desc |= i915_ggtt_offset(ce->state) + LRC_PPHWSP_PN * PAGE_SIZE; /* bits 12-31 */ desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ @@ -373,7 +373,7 @@ static void execlists_update_context(struct drm_i915_gem_request *rq) struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state; - reg_state[CTX_RING_TAIL+1] = rq->tail; + reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail); /* True 32b PPGTT with dynamic page allocation: update PDP * registers and point the unallocated PDPs to scratch page. @@ -384,8 +384,8 @@ static void execlists_update_context(struct drm_i915_gem_request *rq) execlists_update_context_pdps(ppgtt, reg_state); } -static void execlists_submit_requests(struct drm_i915_gem_request *rq0, - struct drm_i915_gem_request *rq1) +static void execlists_elsp_submit_contexts(struct drm_i915_gem_request *rq0, + struct drm_i915_gem_request *rq1) { struct drm_i915_private *dev_priv = rq0->i915; unsigned int fw_domains = rq0->engine->fw_domains; @@ -418,7 +418,7 @@ static inline void execlists_context_status_change( atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq); } -static void execlists_context_unqueue(struct intel_engine_cs *engine) +static void execlists_unqueue(struct intel_engine_cs *engine) { struct drm_i915_gem_request *req0 = NULL, *req1 = NULL; struct drm_i915_gem_request *cursor, *tmp; @@ -441,7 +441,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine) * will update tail past first request's workload */ cursor->elsp_submitted = req0->elsp_submitted; list_del(&req0->execlist_link); - i915_gem_request_unreference(req0); + i915_gem_request_put(req0); req0 = cursor; } else { if (IS_ENABLED(CONFIG_DRM_I915_GVT)) { @@ -482,14 +482,11 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine) * resubmit the request. See gen8_emit_request() for where we * prepare the padding after the end of the request. */ - struct intel_ringbuffer *ringbuf; - - ringbuf = req0->ctx->engine[engine->id].ringbuf; req0->tail += 8; - req0->tail &= ringbuf->size - 1; + req0->tail &= req0->ring->size - 1; } - execlists_submit_requests(req0, req1); + execlists_elsp_submit_contexts(req0, req1); } static unsigned int @@ -514,7 +511,7 @@ execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id) execlists_context_status_change(head_req, INTEL_CONTEXT_SCHEDULE_OUT); list_del(&head_req->execlist_link); - i915_gem_request_unreference(head_req); + i915_gem_request_put(head_req); return 1; } @@ -539,10 +536,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer, return status; } -/** - * intel_lrc_irq_handler() - handle Context Switch interrupts - * @data: tasklet handler passed in unsigned long - * +/* * Check the unread Context Status Buffers and manage the submission of new * contexts to the ELSP accordingly. */ @@ -603,7 +597,7 @@ static void intel_lrc_irq_handler(unsigned long data) if (submit_contexts) { if (!engine->disable_lite_restore_wa || (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE)) - execlists_context_unqueue(engine); + execlists_unqueue(engine); } spin_unlock(&engine->execlist_lock); @@ -612,7 +606,7 @@ static void intel_lrc_irq_handler(unsigned long data) DRM_ERROR("More than two context complete events?\n"); } -static void execlists_context_queue(struct drm_i915_gem_request *request) +static void execlists_submit_request(struct drm_i915_gem_request *request) { struct intel_engine_cs *engine = request->engine; struct drm_i915_gem_request *cursor; @@ -635,70 +629,19 @@ static void execlists_context_queue(struct drm_i915_gem_request *request) WARN(tail_req->elsp_submitted != 0, "More than 2 already-submitted reqs queued\n"); list_del(&tail_req->execlist_link); - i915_gem_request_unreference(tail_req); + i915_gem_request_put(tail_req); } } - i915_gem_request_reference(request); + i915_gem_request_get(request); list_add_tail(&request->execlist_link, &engine->execlist_queue); request->ctx_hw_id = request->ctx->hw_id; if (num_elements == 0) - execlists_context_unqueue(engine); + execlists_unqueue(engine); spin_unlock_bh(&engine->execlist_lock); } -static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req) -{ - struct intel_engine_cs *engine = req->engine; - uint32_t flush_domains; - int ret; - - flush_domains = 0; - if (engine->gpu_caches_dirty) - flush_domains = I915_GEM_GPU_DOMAINS; - - ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains); - if (ret) - return ret; - - engine->gpu_caches_dirty = false; - return 0; -} - -static int execlists_move_to_gpu(struct drm_i915_gem_request *req, - struct list_head *vmas) -{ - const unsigned other_rings = ~intel_engine_flag(req->engine); - struct i915_vma *vma; - uint32_t flush_domains = 0; - bool flush_chipset = false; - int ret; - - list_for_each_entry(vma, vmas, exec_list) { - struct drm_i915_gem_object *obj = vma->obj; - - if (obj->active & other_rings) { - ret = i915_gem_object_sync(obj, req->engine, &req); - if (ret) - return ret; - } - - if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) - flush_chipset |= i915_gem_clflush_object(obj, false); - - flush_domains |= obj->base.write_domain; - } - - if (flush_domains & I915_GEM_DOMAIN_GTT) - wmb(); - - /* Unconditionally invalidate gpu caches and ensure that we do flush - * any residual writes from the previous batch. - */ - return logical_ring_invalidate_all_caches(req); -} - int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) { struct intel_engine_cs *engine = request->engine; @@ -717,7 +660,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request return ret; } - request->ringbuf = ce->ringbuf; + request->ring = ce->ring; if (i915.enable_guc_submission) { /* @@ -762,7 +705,7 @@ err_unpin: } /* - * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload + * intel_logical_ring_advance() - advance the tail and prepare for submission * @request: Request to advance the logical ringbuffer of. * * The tail is updated in our logical ringbuffer struct, not in the actual context. What @@ -771,13 +714,13 @@ err_unpin: * point, the tail *inside* the context is updated and the ELSP written to. */ static int -intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) +intel_logical_ring_advance(struct drm_i915_gem_request *request) { - struct intel_ringbuffer *ringbuf = request->ringbuf; + struct intel_ring *ring = request->ring; struct intel_engine_cs *engine = request->engine; - intel_logical_ring_advance(ringbuf); - request->tail = ringbuf->tail; + intel_ring_advance(ring); + request->tail = ring->tail; /* * Here we add two extra NOOPs as padding to avoid @@ -785,9 +728,9 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) * * Caller must reserve WA_TAIL_DWORDS for us! */ - intel_logical_ring_emit(ringbuf, MI_NOOP); - intel_logical_ring_emit(ringbuf, MI_NOOP); - intel_logical_ring_advance(ringbuf); + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); /* We keep the previous context alive until we retire the following * request. This ensures that any the context object is still pinned @@ -797,100 +740,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) */ request->previous_context = engine->last_context; engine->last_context = request->ctx; - - if (i915.enable_guc_submission) - i915_guc_submit(request); - else - execlists_context_queue(request); - - return 0; -} - -/** - * execlists_submission() - submit a batchbuffer for execution, Execlists style - * @params: execbuffer call parameters. - * @args: execbuffer call arguments. - * @vmas: list of vmas. - * - * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts - * away the submission details of the execbuffer ioctl call. - * - * Return: non-zero if the submission fails. - */ -int intel_execlists_submission(struct i915_execbuffer_params *params, - struct drm_i915_gem_execbuffer2 *args, - struct list_head *vmas) -{ - struct drm_device *dev = params->dev; - struct intel_engine_cs *engine = params->engine; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf; - u64 exec_start; - int instp_mode; - u32 instp_mask; - int ret; - - instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; - instp_mask = I915_EXEC_CONSTANTS_MASK; - switch (instp_mode) { - case I915_EXEC_CONSTANTS_REL_GENERAL: - case I915_EXEC_CONSTANTS_ABSOLUTE: - case I915_EXEC_CONSTANTS_REL_SURFACE: - if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) { - DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); - return -EINVAL; - } - - if (instp_mode != dev_priv->relative_constants_mode) { - if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) { - DRM_DEBUG("rel surface constants mode invalid on gen5+\n"); - return -EINVAL; - } - - /* The HW changed the meaning on this bit on gen6 */ - instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE; - } - break; - default: - DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode); - return -EINVAL; - } - - if (args->flags & I915_EXEC_GEN7_SOL_RESET) { - DRM_DEBUG("sol reset is gen7 only\n"); - return -EINVAL; - } - - ret = execlists_move_to_gpu(params->request, vmas); - if (ret) - return ret; - - if (engine == &dev_priv->engine[RCS] && - instp_mode != dev_priv->relative_constants_mode) { - ret = intel_ring_begin(params->request, 4); - if (ret) - return ret; - - intel_logical_ring_emit(ringbuf, MI_NOOP); - intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1)); - intel_logical_ring_emit_reg(ringbuf, INSTPM); - intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode); - intel_logical_ring_advance(ringbuf); - - dev_priv->relative_constants_mode = instp_mode; - } - - exec_start = params->batch_obj_vm_offset + - args->batch_start_offset; - - ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags); - if (ret) - return ret; - - trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags); - - i915_gem_execbuffer_move_to_active(vmas, params->request); - return 0; } @@ -907,55 +756,13 @@ void intel_execlists_cancel_requests(struct intel_engine_cs *engine) list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) { list_del(&req->execlist_link); - i915_gem_request_unreference(req); + i915_gem_request_put(req); } } -void intel_logical_ring_stop(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - int ret; - - if (!intel_engine_initialized(engine)) - return; - - ret = intel_engine_idle(engine); - if (ret) - DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", - engine->name, ret); - - /* TODO: Is this correct with Execlists enabled? */ - I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); - if (intel_wait_for_register(dev_priv, - RING_MI_MODE(engine->mmio_base), - MODE_IDLE, MODE_IDLE, - 1000)) { - DRM_ERROR("%s :timed out trying to stop ring\n", engine->name); - return; - } - I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); -} - -int logical_ring_flush_all_caches(struct drm_i915_gem_request *req) -{ - struct intel_engine_cs *engine = req->engine; - int ret; - - if (!engine->gpu_caches_dirty) - return 0; - - ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS); - if (ret) - return ret; - - engine->gpu_caches_dirty = false; - return 0; -} - static int intel_lr_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = ctx->i915; struct intel_context *ce = &ctx->engine[engine->id]; void *vaddr; u32 *lrc_reg_state; @@ -966,41 +773,43 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx, if (ce->pin_count++) return 0; - ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN, - PIN_OFFSET_BIAS | GUC_WOPCM_TOP); + ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN, + PIN_OFFSET_BIAS | GUC_WOPCM_TOP | PIN_GLOBAL); if (ret) goto err; - vaddr = i915_gem_object_pin_map(ce->state); + vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB); if (IS_ERR(vaddr)) { ret = PTR_ERR(vaddr); - goto unpin_ctx_obj; + goto unpin_vma; } lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; - ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf); + ret = intel_ring_pin(ce->ring); if (ret) goto unpin_map; - i915_gem_context_reference(ctx); - ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state); intel_lr_context_descriptor_update(ctx, engine); - lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start; + lrc_reg_state[CTX_RING_BUFFER_START+1] = + i915_ggtt_offset(ce->ring->vma); ce->lrc_reg_state = lrc_reg_state; - ce->state->dirty = true; + ce->state->obj->dirty = true; /* Invalidate GuC TLB. */ - if (i915.enable_guc_submission) + if (i915.enable_guc_submission) { + struct drm_i915_private *dev_priv = ctx->i915; I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); + } + i915_gem_context_get(ctx); return 0; unpin_map: - i915_gem_object_unpin_map(ce->state); -unpin_ctx_obj: - i915_gem_object_ggtt_unpin(ce->state); + i915_gem_object_unpin_map(ce->state->obj); +unpin_vma: + __i915_vma_unpin(ce->state); err: ce->pin_count = 0; return ret; @@ -1017,30 +826,24 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx, if (--ce->pin_count) return; - intel_unpin_ringbuffer_obj(ce->ringbuf); - - i915_gem_object_unpin_map(ce->state); - i915_gem_object_ggtt_unpin(ce->state); + intel_ring_unpin(ce->ring); - ce->lrc_vma = NULL; - ce->lrc_desc = 0; - ce->lrc_reg_state = NULL; + i915_gem_object_unpin_map(ce->state->obj); + i915_vma_unpin(ce->state); - i915_gem_context_unreference(ctx); + i915_gem_context_put(ctx); } static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) { int ret, i; - struct intel_engine_cs *engine = req->engine; - struct intel_ringbuffer *ringbuf = req->ringbuf; + struct intel_ring *ring = req->ring; struct i915_workarounds *w = &req->i915->workarounds; if (w->count == 0) return 0; - engine->gpu_caches_dirty = true; - ret = logical_ring_flush_all_caches(req); + ret = req->engine->emit_flush(req, EMIT_BARRIER); if (ret) return ret; @@ -1048,17 +851,16 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) if (ret) return ret; - intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count)); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count)); for (i = 0; i < w->count; i++) { - intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr); - intel_logical_ring_emit(ringbuf, w->reg[i].value); + intel_ring_emit_reg(ring, w->reg[i].addr); + intel_ring_emit(ring, w->reg[i].value); } - intel_logical_ring_emit(ringbuf, MI_NOOP); + intel_ring_emit(ring, MI_NOOP); - intel_logical_ring_advance(ringbuf); + intel_ring_advance(ring); - engine->gpu_caches_dirty = true; - ret = logical_ring_flush_all_caches(req); + ret = req->engine->emit_flush(req, EMIT_BARRIER); if (ret) return ret; @@ -1094,7 +896,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) * code duplication. */ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, - uint32_t *const batch, + uint32_t *batch, uint32_t index) { struct drm_i915_private *dev_priv = engine->i915; @@ -1113,7 +915,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT)); wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); - wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256); + wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256); wa_ctx_emit(batch, index, 0); wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); @@ -1131,7 +933,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT)); wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); - wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256); + wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256); wa_ctx_emit(batch, index, 0); return index; @@ -1156,37 +958,24 @@ static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx, return 0; } -/** - * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA - * - * @engine: only applicable for RCS - * @wa_ctx: structure representing wa_ctx - * offset: specifies start of the batch, should be cache-aligned. This is updated - * with the offset value received as input. - * size: size of the batch in DWORDS but HW expects in terms of cachelines - * @batch: page in which WA are loaded - * @offset: This field specifies the start of the batch, it should be - * cache-aligned otherwise it is adjusted accordingly. - * Typically we only have one indirect_ctx and per_ctx batch buffer which are - * initialized at the beginning and shared across all contexts but this field - * helps us to have multiple batches at different offsets and select them based - * on a criteria. At the moment this batch always start at the beginning of the page - * and at this point we don't have multiple wa_ctx batch buffers. - * - * The number of WA applied are not known at the beginning; we use this field - * to return the no of DWORDS written. +/* + * Typically we only have one indirect_ctx and per_ctx batch buffer which are + * initialized at the beginning and shared across all contexts but this field + * helps us to have multiple batches at different offsets and select them based + * on a criteria. At the moment this batch always start at the beginning of the page + * and at this point we don't have multiple wa_ctx batch buffers. * - * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END - * so it adds NOOPs as padding to make it cacheline aligned. - * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together - * makes a complete batch buffer. + * The number of WA applied are not known at the beginning; we use this field + * to return the no of DWORDS written. * - * Return: non-zero if we exceed the PAGE_SIZE limit. + * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END + * so it adds NOOPs as padding to make it cacheline aligned. + * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together + * makes a complete batch buffer. */ - static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine, struct i915_wa_ctx_bb *wa_ctx, - uint32_t *const batch, + uint32_t *batch, uint32_t *offset) { uint32_t scratch_addr; @@ -1205,7 +994,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine, /* WaClearSlmSpaceAtContextSwitch:bdw,chv */ /* Actual scratch location is at 128 bytes offset */ - scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES; + scratch_addr = i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES; wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 | @@ -1230,26 +1019,18 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine, return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS); } -/** - * gen8_init_perctx_bb() - initialize per ctx batch with WA - * - * @engine: only applicable for RCS - * @wa_ctx: structure representing wa_ctx - * offset: specifies start of the batch, should be cache-aligned. - * size: size of the batch in DWORDS but HW expects in terms of cachelines - * @batch: page in which WA are loaded - * @offset: This field specifies the start of this batch. - * This batch is started immediately after indirect_ctx batch. Since we ensure - * that indirect_ctx ends on a cacheline this batch is aligned automatically. +/* + * This batch is started immediately after indirect_ctx batch. Since we ensure + * that indirect_ctx ends on a cacheline this batch is aligned automatically. * - * The number of DWORDS written are returned using this field. + * The number of DWORDS written are returned using this field. * * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant. */ static int gen8_init_perctx_bb(struct intel_engine_cs *engine, struct i915_wa_ctx_bb *wa_ctx, - uint32_t *const batch, + uint32_t *batch, uint32_t *offset) { uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); @@ -1264,7 +1045,7 @@ static int gen8_init_perctx_bb(struct intel_engine_cs *engine, static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, struct i915_wa_ctx_bb *wa_ctx, - uint32_t *const batch, + uint32_t *batch, uint32_t *offset) { int ret; @@ -1282,11 +1063,18 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, return ret; index = ret; + /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl */ + wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); + wa_ctx_emit_reg(batch, index, COMMON_SLICE_CHICKEN2); + wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE( + GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE)); + wa_ctx_emit(batch, index, MI_NOOP); + /* WaClearSlmSpaceAtContextSwitch:kbl */ /* Actual scratch location is at 128 bytes offset */ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) { - uint32_t scratch_addr - = engine->scratch.gtt_offset + 2*CACHELINE_BYTES; + u32 scratch_addr = + i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES; wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 | @@ -1332,7 +1120,7 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, static int gen9_init_perctx_bb(struct intel_engine_cs *engine, struct i915_wa_ctx_bb *wa_ctx, - uint32_t *const batch, + uint32_t *batch, uint32_t *offset) { uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); @@ -1378,44 +1166,44 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine, static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size) { - int ret; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int err; - engine->wa_ctx.obj = i915_gem_object_create(&engine->i915->drm, - PAGE_ALIGN(size)); - if (IS_ERR(engine->wa_ctx.obj)) { - DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n"); - ret = PTR_ERR(engine->wa_ctx.obj); - engine->wa_ctx.obj = NULL; - return ret; - } + obj = i915_gem_object_create(&engine->i915->drm, PAGE_ALIGN(size)); + if (IS_ERR(obj)) + return PTR_ERR(obj); - ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0); - if (ret) { - DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n", - ret); - drm_gem_object_unreference(&engine->wa_ctx.obj->base); - return ret; + vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err; } + err = i915_vma_pin(vma, 0, PAGE_SIZE, PIN_GLOBAL | PIN_HIGH); + if (err) + goto err; + + engine->wa_ctx.vma = vma; return 0; + +err: + i915_gem_object_put(obj); + return err; } static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine) { - if (engine->wa_ctx.obj) { - i915_gem_object_ggtt_unpin(engine->wa_ctx.obj); - drm_gem_object_unreference(&engine->wa_ctx.obj->base); - engine->wa_ctx.obj = NULL; - } + i915_vma_unpin_and_release(&engine->wa_ctx.vma); } static int intel_init_workaround_bb(struct intel_engine_cs *engine) { - int ret; + struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; uint32_t *batch; uint32_t offset; struct page *page; - struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; + int ret; WARN_ON(engine->id != RCS); @@ -1427,7 +1215,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) } /* some WA perform writes to scratch page, ensure it is valid */ - if (engine->scratch.obj == NULL) { + if (!engine->scratch) { DRM_ERROR("scratch page not allocated for %s\n", engine->name); return -EINVAL; } @@ -1438,7 +1226,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) return ret; } - page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0); + page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0); batch = kmap_atomic(page); offset = 0; @@ -1485,7 +1273,7 @@ static void lrc_init_hws(struct intel_engine_cs *engine) struct drm_i915_private *dev_priv = engine->i915; I915_WRITE(RING_HWS_PGA(engine->mmio_base), - (u32)engine->status_page.gfx_addr); + engine->status_page.ggtt_offset); POSTING_READ(RING_HWS_PGA(engine->mmio_base)); } @@ -1572,8 +1360,8 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine) static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) { struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt; + struct intel_ring *ring = req->ring; struct intel_engine_cs *engine = req->engine; - struct intel_ringbuffer *ringbuf = req->ringbuf; const int num_lri_cmds = GEN8_LEGACY_PDPES * 2; int i, ret; @@ -1581,28 +1369,27 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) if (ret) return ret; - intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds)); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds)); for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) { const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); - intel_logical_ring_emit_reg(ringbuf, - GEN8_RING_PDP_UDW(engine, i)); - intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr)); - intel_logical_ring_emit_reg(ringbuf, - GEN8_RING_PDP_LDW(engine, i)); - intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr)); + intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i)); + intel_ring_emit(ring, upper_32_bits(pd_daddr)); + intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i)); + intel_ring_emit(ring, lower_32_bits(pd_daddr)); } - intel_logical_ring_emit(ringbuf, MI_NOOP); - intel_logical_ring_advance(ringbuf); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } static int gen8_emit_bb_start(struct drm_i915_gem_request *req, - u64 offset, unsigned dispatch_flags) + u64 offset, u32 len, + unsigned int dispatch_flags) { - struct intel_ringbuffer *ringbuf = req->ringbuf; + struct intel_ring *ring = req->ring; bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE); int ret; @@ -1629,14 +1416,14 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, return ret; /* FIXME(BDW): Address space and security selectors. */ - intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | - (ppgtt<<8) | - (dispatch_flags & I915_DISPATCH_RS ? - MI_BATCH_RESOURCE_STREAMER : 0)); - intel_logical_ring_emit(ringbuf, lower_32_bits(offset)); - intel_logical_ring_emit(ringbuf, upper_32_bits(offset)); - intel_logical_ring_emit(ringbuf, MI_NOOP); - intel_logical_ring_advance(ringbuf); + intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | + (ppgtt<<8) | + (dispatch_flags & I915_DISPATCH_RS ? + MI_BATCH_RESOURCE_STREAMER : 0)); + intel_ring_emit(ring, lower_32_bits(offset)); + intel_ring_emit(ring, upper_32_bits(offset)); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } @@ -1655,14 +1442,10 @@ static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine) I915_WRITE_IMR(engine, ~engine->irq_keep_mask); } -static int gen8_emit_flush(struct drm_i915_gem_request *request, - u32 invalidate_domains, - u32 unused) +static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode) { - struct intel_ringbuffer *ringbuf = request->ringbuf; - struct intel_engine_cs *engine = ringbuf->engine; - struct drm_i915_private *dev_priv = request->i915; - uint32_t cmd; + struct intel_ring *ring = request->ring; + u32 cmd; int ret; ret = intel_ring_begin(request, 4); @@ -1678,30 +1461,30 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, */ cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; - if (invalidate_domains & I915_GEM_GPU_DOMAINS) { + if (mode & EMIT_INVALIDATE) { cmd |= MI_INVALIDATE_TLB; - if (engine == &dev_priv->engine[VCS]) + if (request->engine->id == VCS) cmd |= MI_INVALIDATE_BSD; } - intel_logical_ring_emit(ringbuf, cmd); - intel_logical_ring_emit(ringbuf, - I915_GEM_HWS_SCRATCH_ADDR | - MI_FLUSH_DW_USE_GTT); - intel_logical_ring_emit(ringbuf, 0); /* upper addr */ - intel_logical_ring_emit(ringbuf, 0); /* value */ - intel_logical_ring_advance(ringbuf); + intel_ring_emit(ring, cmd); + intel_ring_emit(ring, + I915_GEM_HWS_SCRATCH_ADDR | + MI_FLUSH_DW_USE_GTT); + intel_ring_emit(ring, 0); /* upper addr */ + intel_ring_emit(ring, 0); /* value */ + intel_ring_advance(ring); return 0; } static int gen8_emit_flush_render(struct drm_i915_gem_request *request, - u32 invalidate_domains, - u32 flush_domains) + u32 mode) { - struct intel_ringbuffer *ringbuf = request->ringbuf; - struct intel_engine_cs *engine = ringbuf->engine; - u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; + struct intel_ring *ring = request->ring; + struct intel_engine_cs *engine = request->engine; + u32 scratch_addr = + i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES; bool vf_flush_wa = false, dc_flush_wa = false; u32 flags = 0; int ret; @@ -1709,14 +1492,14 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, flags |= PIPE_CONTROL_CS_STALL; - if (flush_domains) { + if (mode & EMIT_FLUSH) { flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; flags |= PIPE_CONTROL_FLUSH_ENABLE; } - if (invalidate_domains) { + if (mode & EMIT_INVALIDATE) { flags |= PIPE_CONTROL_TLB_INVALIDATE; flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; @@ -1751,40 +1534,40 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, return ret; if (vf_flush_wa) { - intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); } if (dc_flush_wa) { - intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); - intel_logical_ring_emit(ringbuf, PIPE_CONTROL_DC_FLUSH_ENABLE); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); } - intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); - intel_logical_ring_emit(ringbuf, flags); - intel_logical_ring_emit(ringbuf, scratch_addr); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(ring, flags); + intel_ring_emit(ring, scratch_addr); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); if (dc_flush_wa) { - intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); - intel_logical_ring_emit(ringbuf, PIPE_CONTROL_CS_STALL); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, 0); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(ring, PIPE_CONTROL_CS_STALL); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); } - intel_logical_ring_advance(ringbuf); + intel_ring_advance(ring); return 0; } @@ -1813,7 +1596,7 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine) static int gen8_emit_request(struct drm_i915_gem_request *request) { - struct intel_ringbuffer *ringbuf = request->ringbuf; + struct intel_ring *ring = request->ring; int ret; ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS); @@ -1823,21 +1606,20 @@ static int gen8_emit_request(struct drm_i915_gem_request *request) /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */ BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5)); - intel_logical_ring_emit(ringbuf, - (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); - intel_logical_ring_emit(ringbuf, - intel_hws_seqno_address(request->engine) | - MI_FLUSH_DW_USE_GTT); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, request->seqno); - intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); - intel_logical_ring_emit(ringbuf, MI_NOOP); - return intel_logical_ring_advance_and_submit(request); + intel_ring_emit(ring, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); + intel_ring_emit(ring, + intel_hws_seqno_address(request->engine) | + MI_FLUSH_DW_USE_GTT); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, request->fence.seqno); + intel_ring_emit(ring, MI_USER_INTERRUPT); + intel_ring_emit(ring, MI_NOOP); + return intel_logical_ring_advance(request); } static int gen8_emit_request_render(struct drm_i915_gem_request *request) { - struct intel_ringbuffer *ringbuf = request->ringbuf; + struct intel_ring *ring = request->ring; int ret; ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS); @@ -1851,50 +1633,19 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request) * need a prior CS_STALL, which is emitted by the flush * following the batch. */ - intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); - intel_logical_ring_emit(ringbuf, - (PIPE_CONTROL_GLOBAL_GTT_IVB | - PIPE_CONTROL_CS_STALL | - PIPE_CONTROL_QW_WRITE)); - intel_logical_ring_emit(ringbuf, - intel_hws_seqno_address(request->engine)); - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(ring, + (PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_QW_WRITE)); + intel_ring_emit(ring, intel_hws_seqno_address(request->engine)); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, i915_gem_request_get_seqno(request)); /* We're thrashing one dword of HWS. */ - intel_logical_ring_emit(ringbuf, 0); - intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); - intel_logical_ring_emit(ringbuf, MI_NOOP); - return intel_logical_ring_advance_and_submit(request); -} - -static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req) -{ - struct render_state so; - int ret; - - ret = i915_gem_render_state_prepare(req->engine, &so); - if (ret) - return ret; - - if (so.rodata == NULL) - return 0; - - ret = req->engine->emit_bb_start(req, so.ggtt_offset, - I915_DISPATCH_SECURE); - if (ret) - goto out; - - ret = req->engine->emit_bb_start(req, - (so.ggtt_offset + so.aux_batch_offset), - I915_DISPATCH_SECURE); - if (ret) - goto out; - - i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req); - -out: - i915_gem_render_state_fini(&so); - return ret; + intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_USER_INTERRUPT); + intel_ring_emit(ring, MI_NOOP); + return intel_logical_ring_advance(request); } static int gen8_init_rcs_context(struct drm_i915_gem_request *req) @@ -1913,14 +1664,12 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req) if (ret) DRM_ERROR("MOCS failed to program: expect performance issues.\n"); - return intel_lr_context_render_state_init(req); + return i915_gem_render_state_init(req); } /** * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer - * * @engine: Engine Command Streamer. - * */ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) { @@ -1939,21 +1688,17 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) dev_priv = engine->i915; if (engine->buffer) { - intel_logical_ring_stop(engine); WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0); } if (engine->cleanup) engine->cleanup(engine); - i915_cmd_parser_fini_ring(engine); - i915_gem_batch_pool_fini(&engine->batch_pool); + intel_engine_cleanup_common(engine); - intel_engine_fini_breadcrumbs(engine); - - if (engine->status_page.obj) { - i915_gem_object_unpin_map(engine->status_page.obj); - engine->status_page.obj = NULL; + if (engine->status_page.vma) { + i915_gem_object_unpin_map(engine->status_page.vma->obj); + engine->status_page.vma = NULL; } intel_lr_context_unpin(dev_priv->kernel_context, engine); @@ -1965,13 +1710,23 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) engine->i915 = NULL; } +void intel_execlists_enable_submission(struct drm_i915_private *dev_priv) +{ + struct intel_engine_cs *engine; + + for_each_engine(engine, dev_priv) + engine->submit_request = execlists_submit_request; +} + static void logical_ring_default_vfuncs(struct intel_engine_cs *engine) { /* Default vfuncs which can be overriden by each engine. */ engine->init_hw = gen8_init_common_ring; - engine->emit_request = gen8_emit_request; engine->emit_flush = gen8_emit_flush; + engine->emit_request = gen8_emit_request; + engine->submit_request = execlists_submit_request; + engine->irq_enable = gen8_logical_ring_enable_irq; engine->irq_disable = gen8_logical_ring_disable_irq; engine->emit_bb_start = gen8_emit_bb_start; @@ -1980,41 +1735,71 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) } static inline void -logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift) +logical_ring_default_irqs(struct intel_engine_cs *engine) { + unsigned shift = engine->irq_shift; engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; } static int -lrc_setup_hws(struct intel_engine_cs *engine, - struct drm_i915_gem_object *dctx_obj) +lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma) { + const int hws_offset = LRC_PPHWSP_PN * PAGE_SIZE; void *hws; /* The HWSP is part of the default context object in LRC mode. */ - engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) + - LRC_PPHWSP_PN * PAGE_SIZE; - hws = i915_gem_object_pin_map(dctx_obj); + hws = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); if (IS_ERR(hws)) return PTR_ERR(hws); - engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE; - engine->status_page.obj = dctx_obj; + + engine->status_page.page_addr = hws + hws_offset; + engine->status_page.ggtt_offset = i915_ggtt_offset(vma) + hws_offset; + engine->status_page.vma = vma; return 0; } +static void +logical_ring_setup(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + enum forcewake_domains fw_domains; + + intel_engine_setup_common(engine); + + /* Intentionally left blank. */ + engine->buffer = NULL; + + fw_domains = intel_uncore_forcewake_for_reg(dev_priv, + RING_ELSP(engine), + FW_REG_WRITE); + + fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, + RING_CONTEXT_STATUS_PTR(engine), + FW_REG_READ | FW_REG_WRITE); + + fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, + RING_CONTEXT_STATUS_BUF_BASE(engine), + FW_REG_READ); + + engine->fw_domains = fw_domains; + + tasklet_init(&engine->irq_tasklet, + intel_lrc_irq_handler, (unsigned long)engine); + + logical_ring_init_platform_invariants(engine); + logical_ring_default_vfuncs(engine); + logical_ring_default_irqs(engine); +} + static int logical_ring_init(struct intel_engine_cs *engine) { struct i915_gem_context *dctx = engine->i915->kernel_context; int ret; - ret = intel_engine_init_breadcrumbs(engine); - if (ret) - goto error; - - ret = i915_cmd_parser_init_ring(engine); + ret = intel_engine_init_common(engine); if (ret) goto error; @@ -2044,11 +1829,13 @@ error: return ret; } -static int logical_render_ring_init(struct intel_engine_cs *engine) +int logical_render_ring_init(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; int ret; + logical_ring_setup(engine); + if (HAS_L3_DPF(dev_priv)) engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; @@ -2058,11 +1845,10 @@ static int logical_render_ring_init(struct intel_engine_cs *engine) else engine->init_hw = gen8_init_render_ring; engine->init_context = gen8_init_rcs_context; - engine->cleanup = intel_fini_pipe_control; engine->emit_flush = gen8_emit_flush_render; engine->emit_request = gen8_emit_request_render; - ret = intel_init_pipe_control(engine, 4096); + ret = intel_engine_create_scratch(engine, 4096); if (ret) return ret; @@ -2085,160 +1871,11 @@ static int logical_render_ring_init(struct intel_engine_cs *engine) return ret; } -static const struct logical_ring_info { - const char *name; - unsigned exec_id; - unsigned guc_id; - u32 mmio_base; - unsigned irq_shift; - int (*init)(struct intel_engine_cs *engine); -} logical_rings[] = { - [RCS] = { - .name = "render ring", - .exec_id = I915_EXEC_RENDER, - .guc_id = GUC_RENDER_ENGINE, - .mmio_base = RENDER_RING_BASE, - .irq_shift = GEN8_RCS_IRQ_SHIFT, - .init = logical_render_ring_init, - }, - [BCS] = { - .name = "blitter ring", - .exec_id = I915_EXEC_BLT, - .guc_id = GUC_BLITTER_ENGINE, - .mmio_base = BLT_RING_BASE, - .irq_shift = GEN8_BCS_IRQ_SHIFT, - .init = logical_ring_init, - }, - [VCS] = { - .name = "bsd ring", - .exec_id = I915_EXEC_BSD, - .guc_id = GUC_VIDEO_ENGINE, - .mmio_base = GEN6_BSD_RING_BASE, - .irq_shift = GEN8_VCS1_IRQ_SHIFT, - .init = logical_ring_init, - }, - [VCS2] = { - .name = "bsd2 ring", - .exec_id = I915_EXEC_BSD, - .guc_id = GUC_VIDEO_ENGINE2, - .mmio_base = GEN8_BSD2_RING_BASE, - .irq_shift = GEN8_VCS2_IRQ_SHIFT, - .init = logical_ring_init, - }, - [VECS] = { - .name = "video enhancement ring", - .exec_id = I915_EXEC_VEBOX, - .guc_id = GUC_VIDEOENHANCE_ENGINE, - .mmio_base = VEBOX_RING_BASE, - .irq_shift = GEN8_VECS_IRQ_SHIFT, - .init = logical_ring_init, - }, -}; - -static struct intel_engine_cs * -logical_ring_setup(struct drm_i915_private *dev_priv, enum intel_engine_id id) +int logical_xcs_ring_init(struct intel_engine_cs *engine) { - const struct logical_ring_info *info = &logical_rings[id]; - struct intel_engine_cs *engine = &dev_priv->engine[id]; - enum forcewake_domains fw_domains; - - engine->id = id; - engine->name = info->name; - engine->exec_id = info->exec_id; - engine->guc_id = info->guc_id; - engine->mmio_base = info->mmio_base; - - engine->i915 = dev_priv; + logical_ring_setup(engine); - /* Intentionally left blank. */ - engine->buffer = NULL; - - fw_domains = intel_uncore_forcewake_for_reg(dev_priv, - RING_ELSP(engine), - FW_REG_WRITE); - - fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, - RING_CONTEXT_STATUS_PTR(engine), - FW_REG_READ | FW_REG_WRITE); - - fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, - RING_CONTEXT_STATUS_BUF_BASE(engine), - FW_REG_READ); - - engine->fw_domains = fw_domains; - - INIT_LIST_HEAD(&engine->active_list); - INIT_LIST_HEAD(&engine->request_list); - INIT_LIST_HEAD(&engine->buffers); - INIT_LIST_HEAD(&engine->execlist_queue); - spin_lock_init(&engine->execlist_lock); - - tasklet_init(&engine->irq_tasklet, - intel_lrc_irq_handler, (unsigned long)engine); - - logical_ring_init_platform_invariants(engine); - logical_ring_default_vfuncs(engine); - logical_ring_default_irqs(engine, info->irq_shift); - - intel_engine_init_hangcheck(engine); - i915_gem_batch_pool_init(&dev_priv->drm, &engine->batch_pool); - - return engine; -} - -/** - * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers - * @dev: DRM device. - * - * This function inits the engines for an Execlists submission style (the - * equivalent in the legacy ringbuffer submission world would be - * i915_gem_init_engines). It does it only for those engines that are present in - * the hardware. - * - * Return: non-zero if the initialization failed. - */ -int intel_logical_rings_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - unsigned int mask = 0; - unsigned int i; - int ret; - - WARN_ON(INTEL_INFO(dev_priv)->ring_mask & - GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES)); - - for (i = 0; i < ARRAY_SIZE(logical_rings); i++) { - if (!HAS_ENGINE(dev_priv, i)) - continue; - - if (!logical_rings[i].init) - continue; - - ret = logical_rings[i].init(logical_ring_setup(dev_priv, i)); - if (ret) - goto cleanup; - - mask |= ENGINE_MASK(i); - } - - /* - * Catch failures to update logical_rings table when the new engines - * are added to the driver by a warning and disabling the forgotten - * engines. - */ - if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask)) { - struct intel_device_info *info = - (struct intel_device_info *)&dev_priv->info; - info->ring_mask = mask; - } - - return 0; - -cleanup: - for (i = 0; i < I915_NUM_ENGINES; i++) - intel_logical_ring_cleanup(&dev_priv->engine[i]); - - return ret; + return logical_ring_init(engine); } static u32 @@ -2309,7 +1946,7 @@ static int populate_lr_context(struct i915_gem_context *ctx, struct drm_i915_gem_object *ctx_obj, struct intel_engine_cs *engine, - struct intel_ringbuffer *ringbuf) + struct intel_ring *ring) { struct drm_i915_private *dev_priv = ctx->i915; struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; @@ -2326,7 +1963,7 @@ populate_lr_context(struct i915_gem_context *ctx, return ret; } - vaddr = i915_gem_object_pin_map(ctx_obj); + vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB); if (IS_ERR(vaddr)) { ret = PTR_ERR(vaddr); DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret); @@ -2362,7 +1999,7 @@ populate_lr_context(struct i915_gem_context *ctx, RING_START(engine->mmio_base), 0); ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, RING_CTL(engine->mmio_base), - ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); + ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U, RING_BBADDR_UDW(engine->mmio_base), 0); ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L, @@ -2383,9 +2020,9 @@ populate_lr_context(struct i915_gem_context *ctx, RING_INDIRECT_CTX(engine->mmio_base), 0); ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0); - if (engine->wa_ctx.obj) { + if (engine->wa_ctx.vma) { struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; - uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj); + u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); reg_state[CTX_RCS_INDIRECT_CTX+1] = (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) | @@ -2484,26 +2121,14 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine) return ret; } -/** - * execlists_context_deferred_alloc() - create the LRC specific bits of a context - * @ctx: LR context to create. - * @engine: engine to be used with the context. - * - * This function can be called more than once, with different engines, if we plan - * to use the context with them. The context backing objects and the ringbuffers - * (specially the ringbuffer backing objects) suck a lot of memory up, and that's why - * the creation is a deferred call: it's better to make sure first that we need to use - * a given ring with the context. - * - * Return: non-zero on error. - */ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { struct drm_i915_gem_object *ctx_obj; struct intel_context *ce = &ctx->engine[engine->id]; + struct i915_vma *vma; uint32_t context_size; - struct intel_ringbuffer *ringbuf; + struct intel_ring *ring; int ret; WARN_ON(ce->state); @@ -2519,30 +2144,34 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, return PTR_ERR(ctx_obj); } - ringbuf = intel_engine_create_ringbuffer(engine, ctx->ring_size); - if (IS_ERR(ringbuf)) { - ret = PTR_ERR(ringbuf); + vma = i915_vma_create(ctx_obj, &ctx->i915->ggtt.base, NULL); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto error_deref_obj; + } + + ring = intel_engine_create_ring(engine, ctx->ring_size); + if (IS_ERR(ring)) { + ret = PTR_ERR(ring); goto error_deref_obj; } - ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf); + ret = populate_lr_context(ctx, ctx_obj, engine, ring); if (ret) { DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); - goto error_ringbuf; + goto error_ring_free; } - ce->ringbuf = ringbuf; - ce->state = ctx_obj; + ce->ring = ring; + ce->state = vma; ce->initialised = engine->init_context == NULL; return 0; -error_ringbuf: - intel_ringbuffer_free(ringbuf); +error_ring_free: + intel_ring_free(ring); error_deref_obj: - drm_gem_object_unreference(&ctx_obj->base); - ce->ringbuf = NULL; - ce->state = NULL; + i915_gem_object_put(ctx_obj); return ret; } @@ -2553,26 +2182,25 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv, for_each_engine(engine, dev_priv) { struct intel_context *ce = &ctx->engine[engine->id]; - struct drm_i915_gem_object *ctx_obj = ce->state; void *vaddr; uint32_t *reg_state; - if (!ctx_obj) + if (!ce->state) continue; - vaddr = i915_gem_object_pin_map(ctx_obj); + vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB); if (WARN_ON(IS_ERR(vaddr))) continue; reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; - ctx_obj->dirty = true; reg_state[CTX_RING_HEAD+1] = 0; reg_state[CTX_RING_TAIL+1] = 0; - i915_gem_object_unpin_map(ctx_obj); + ce->state->obj->dirty = true; + i915_gem_object_unpin_map(ce->state->obj); - ce->ringbuf->head = 0; - ce->ringbuf->tail = 0; + ce->ring->head = 0; + ce->ring->tail = 0; } } diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 2b8255c19dcc..a52cf57dbd40 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -29,17 +29,17 @@ #define GEN8_LR_CONTEXT_ALIGN 4096 /* Execlists regs */ -#define RING_ELSP(ring) _MMIO((ring)->mmio_base + 0x230) -#define RING_EXECLIST_STATUS_LO(ring) _MMIO((ring)->mmio_base + 0x234) -#define RING_EXECLIST_STATUS_HI(ring) _MMIO((ring)->mmio_base + 0x234 + 4) -#define RING_CONTEXT_CONTROL(ring) _MMIO((ring)->mmio_base + 0x244) +#define RING_ELSP(engine) _MMIO((engine)->mmio_base + 0x230) +#define RING_EXECLIST_STATUS_LO(engine) _MMIO((engine)->mmio_base + 0x234) +#define RING_EXECLIST_STATUS_HI(engine) _MMIO((engine)->mmio_base + 0x234 + 4) +#define RING_CONTEXT_CONTROL(engine) _MMIO((engine)->mmio_base + 0x244) #define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3) #define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0) #define CTX_CTRL_RS_CTX_ENABLE (1 << 1) -#define RING_CONTEXT_STATUS_BUF_BASE(ring) _MMIO((ring)->mmio_base + 0x370) -#define RING_CONTEXT_STATUS_BUF_LO(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8) -#define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4) -#define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0) +#define RING_CONTEXT_STATUS_BUF_BASE(engine) _MMIO((engine)->mmio_base + 0x370) +#define RING_CONTEXT_STATUS_BUF_LO(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8) +#define RING_CONTEXT_STATUS_BUF_HI(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8 + 4) +#define RING_CONTEXT_STATUS_PTR(engine) _MMIO((engine)->mmio_base + 0x3a0) /* The docs specify that the write pointer wraps around after 5h, "After status * is written out to the last available status QW at offset 5h, this pointer @@ -67,35 +67,10 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request); void intel_logical_ring_stop(struct intel_engine_cs *engine); void intel_logical_ring_cleanup(struct intel_engine_cs *engine); -int intel_logical_rings_init(struct drm_device *dev); +int logical_render_ring_init(struct intel_engine_cs *engine); +int logical_xcs_ring_init(struct intel_engine_cs *engine); -int logical_ring_flush_all_caches(struct drm_i915_gem_request *req); -/** - * intel_logical_ring_advance() - advance the ringbuffer tail - * @ringbuf: Ringbuffer to advance. - * - * The tail is only updated in our logical ringbuffer struct. - */ -static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf) -{ - ringbuf->tail &= ringbuf->size - 1; -} -/** - * intel_logical_ring_emit() - write a DWORD to the ringbuffer. - * @ringbuf: Ringbuffer to write to. - * @data: DWORD to write. - */ -static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf, - u32 data) -{ - iowrite32(data, ringbuf->virtual_start + ringbuf->tail); - ringbuf->tail += 4; -} -static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf, - i915_reg_t reg) -{ - intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg)); -} +int intel_engines_init(struct drm_device *dev); /* Logical Ring Contexts */ @@ -120,10 +95,7 @@ uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx, /* Execlists */ int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists); -struct i915_execbuffer_params; -int intel_execlists_submission(struct i915_execbuffer_params *params, - struct drm_i915_gem_execbuffer2 *args, - struct list_head *vmas); +void intel_execlists_enable_submission(struct drm_i915_private *dev_priv); void intel_execlists_cancel_requests(struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 49550470483e..52d6ed6f6966 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -48,6 +48,20 @@ struct intel_lvds_connector { struct notifier_block lid_notifier; }; +struct intel_lvds_pps { + /* 100us units */ + int t1_t2; + int t3; + int t4; + int t5; + int tx; + + int divider; + + int port; + bool powerdown_on_reset; +}; + struct intel_lvds_encoder { struct intel_encoder base; @@ -55,6 +69,9 @@ struct intel_lvds_encoder { i915_reg_t reg; u32 a3_power; + struct intel_lvds_pps init_pps; + u32 init_lvds_val; + struct intel_lvds_connector *attached_connector; }; @@ -136,6 +153,83 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock; } +static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv, + struct intel_lvds_pps *pps) +{ + u32 val; + + pps->powerdown_on_reset = I915_READ(PP_CONTROL(0)) & PANEL_POWER_RESET; + + val = I915_READ(PP_ON_DELAYS(0)); + pps->port = (val & PANEL_PORT_SELECT_MASK) >> + PANEL_PORT_SELECT_SHIFT; + pps->t1_t2 = (val & PANEL_POWER_UP_DELAY_MASK) >> + PANEL_POWER_UP_DELAY_SHIFT; + pps->t5 = (val & PANEL_LIGHT_ON_DELAY_MASK) >> + PANEL_LIGHT_ON_DELAY_SHIFT; + + val = I915_READ(PP_OFF_DELAYS(0)); + pps->t3 = (val & PANEL_POWER_DOWN_DELAY_MASK) >> + PANEL_POWER_DOWN_DELAY_SHIFT; + pps->tx = (val & PANEL_LIGHT_OFF_DELAY_MASK) >> + PANEL_LIGHT_OFF_DELAY_SHIFT; + + val = I915_READ(PP_DIVISOR(0)); + pps->divider = (val & PP_REFERENCE_DIVIDER_MASK) >> + PP_REFERENCE_DIVIDER_SHIFT; + val = (val & PANEL_POWER_CYCLE_DELAY_MASK) >> + PANEL_POWER_CYCLE_DELAY_SHIFT; + /* + * Remove the BSpec specified +1 (100ms) offset that accounts for a + * too short power-cycle delay due to the asynchronous programming of + * the register. + */ + if (val) + val--; + /* Convert from 100ms to 100us units */ + pps->t4 = val * 1000; + + if (INTEL_INFO(dev_priv)->gen <= 4 && + pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) { + DRM_DEBUG_KMS("Panel power timings uninitialized, " + "setting defaults\n"); + /* Set T2 to 40ms and T5 to 200ms in 100 usec units */ + pps->t1_t2 = 40 * 10; + pps->t5 = 200 * 10; + /* Set T3 to 35ms and Tx to 200ms in 100 usec units */ + pps->t3 = 35 * 10; + pps->tx = 200 * 10; + } + + DRM_DEBUG_DRIVER("LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d " + "divider %d port %d powerdown_on_reset %d\n", + pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx, + pps->divider, pps->port, pps->powerdown_on_reset); +} + +static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv, + struct intel_lvds_pps *pps) +{ + u32 val; + + val = I915_READ(PP_CONTROL(0)); + WARN_ON((val & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS); + if (pps->powerdown_on_reset) + val |= PANEL_POWER_RESET; + I915_WRITE(PP_CONTROL(0), val); + + I915_WRITE(PP_ON_DELAYS(0), (pps->port << PANEL_PORT_SELECT_SHIFT) | + (pps->t1_t2 << PANEL_POWER_UP_DELAY_SHIFT) | + (pps->t5 << PANEL_LIGHT_ON_DELAY_SHIFT)); + I915_WRITE(PP_OFF_DELAYS(0), (pps->t3 << PANEL_POWER_DOWN_DELAY_SHIFT) | + (pps->tx << PANEL_LIGHT_OFF_DELAY_SHIFT)); + + val = pps->divider << PP_REFERENCE_DIVIDER_SHIFT; + val |= (DIV_ROUND_UP(pps->t4, 1000) + 1) << + PANEL_POWER_CYCLE_DELAY_SHIFT; + I915_WRITE(PP_DIVISOR(0), val); +} + static void intel_pre_enable_lvds(struct intel_encoder *encoder) { struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); @@ -154,7 +248,9 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder) assert_pll_disabled(dev_priv, pipe); } - temp = I915_READ(lvds_encoder->reg); + intel_lvds_pps_init_hw(dev_priv, &lvds_encoder->init_pps); + + temp = lvds_encoder->init_lvds_val; temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; if (HAS_PCH_CPT(dev)) { @@ -217,21 +313,12 @@ static void intel_enable_lvds(struct intel_encoder *encoder) struct intel_connector *intel_connector = &lvds_encoder->attached_connector->base; struct drm_i915_private *dev_priv = to_i915(dev); - i915_reg_t ctl_reg, stat_reg; - - if (HAS_PCH_SPLIT(dev)) { - ctl_reg = PCH_PP_CONTROL; - stat_reg = PCH_PP_STATUS; - } else { - ctl_reg = PP_CONTROL; - stat_reg = PP_STATUS; - } I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN); - I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); + I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON); POSTING_READ(lvds_encoder->reg); - if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, PP_ON, 1000)) + if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 1000)) DRM_ERROR("timed out waiting for panel to power on\n"); intel_panel_enable_backlight(intel_connector); @@ -242,18 +329,9 @@ static void intel_disable_lvds(struct intel_encoder *encoder) struct drm_device *dev = encoder->base.dev; struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); struct drm_i915_private *dev_priv = to_i915(dev); - i915_reg_t ctl_reg, stat_reg; - - if (HAS_PCH_SPLIT(dev)) { - ctl_reg = PCH_PP_CONTROL; - stat_reg = PCH_PP_STATUS; - } else { - ctl_reg = PP_CONTROL; - stat_reg = PP_STATUS; - } - I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); - if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, 0, 1000)) + I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON); + if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, 0, 1000)) DRM_ERROR("timed out waiting for panel to power off\n"); I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN); @@ -900,17 +978,6 @@ void intel_lvds_init(struct drm_device *dev) int pipe; u8 pin; - /* - * Unlock registers and just leave them unlocked. Do this before - * checking quirk lists to avoid bogus WARNINGs. - */ - if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(PCH_PP_CONTROL, - I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); - } else if (INTEL_INFO(dev_priv)->gen < 5) { - I915_WRITE(PP_CONTROL, - I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); - } if (!intel_lvds_supported(dev)) return; @@ -943,18 +1010,6 @@ void intel_lvds_init(struct drm_device *dev) DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n"); } - /* Set the Panel Power On/Off timings if uninitialized. */ - if (INTEL_INFO(dev_priv)->gen < 5 && - I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) { - /* Set T2 to 40ms and T5 to 200ms */ - I915_WRITE(PP_ON_DELAYS, 0x019007d0); - - /* Set T3 to 35ms and Tx to 200ms */ - I915_WRITE(PP_OFF_DELAYS, 0x015e07d0); - - DRM_DEBUG_KMS("Panel power timings uninitialized, setting defaults\n"); - } - lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL); if (!lvds_encoder) return; @@ -1020,6 +1075,10 @@ void intel_lvds_init(struct drm_device *dev) dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_ASPECT); intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; + + intel_lvds_pps_get_hw_state(dev_priv, &lvds_encoder->init_pps); + lvds_encoder->init_lvds_val = lvds; + /* * LVDS discovery: * 1) check for EDID on DDC @@ -1054,17 +1113,6 @@ void intel_lvds_init(struct drm_device *dev) } lvds_connector->base.edid = edid; - if (IS_ERR_OR_NULL(edid)) { - /* Didn't get an EDID, so - * Set wide sync ranges so we get all modes - * handed to valid_mode for checking - */ - connector->display_info.min_vfreq = 0; - connector->display_info.max_vfreq = 200; - connector->display_info.min_hfreq = 0; - connector->display_info.max_hfreq = 200; - } - list_for_each_entry(scan, &connector->probed_modes, head) { if (scan->type & DRM_MODE_TYPE_PREFERRED) { DRM_DEBUG_KMS("using preferred mode from EDID: "); diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index 927825f5b284..80bb9247ce66 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c @@ -97,7 +97,8 @@ struct drm_i915_mocs_table { * end. */ static const struct drm_i915_mocs_entry skylake_mocs_table[] = { - { /* 0x00000009 */ + [I915_MOCS_UNCACHED] = { + /* 0x00000009 */ .control_value = LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LE_TC_LLC_ELLC) | LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | @@ -106,7 +107,7 @@ static const struct drm_i915_mocs_entry skylake_mocs_table[] = { /* 0x0010 */ .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC), }, - { + [I915_MOCS_PTE] = { /* 0x00000038 */ .control_value = LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LE_TC_LLC_ELLC) | @@ -115,7 +116,7 @@ static const struct drm_i915_mocs_entry skylake_mocs_table[] = { /* 0x0030 */ .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB), }, - { + [I915_MOCS_CACHED] = { /* 0x0000003b */ .control_value = LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LE_TC_LLC_ELLC) | @@ -128,7 +129,7 @@ static const struct drm_i915_mocs_entry skylake_mocs_table[] = { /* NOTE: the LE_TGT_CACHE is not used on Broxton */ static const struct drm_i915_mocs_entry broxton_mocs_table[] = { - { + [I915_MOCS_UNCACHED] = { /* 0x00000009 */ .control_value = LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LE_TC_LLC_ELLC) | @@ -138,7 +139,7 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = { /* 0x0010 */ .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC), }, - { + [I915_MOCS_PTE] = { /* 0x00000038 */ .control_value = LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LE_TC_LLC_ELLC) | @@ -148,7 +149,7 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = { /* 0x0030 */ .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB), }, - { + [I915_MOCS_CACHED] = { /* 0x00000039 */ .control_value = LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LE_TC_LLC_ELLC) | @@ -203,9 +204,9 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv, return result; } -static i915_reg_t mocs_register(enum intel_engine_id ring, int index) +static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index) { - switch (ring) { + switch (engine_id) { case RCS: return GEN9_GFX_MOCS(index); case VCS: @@ -217,7 +218,7 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index) case VCS2: return GEN9_MFX1_MOCS(index); default: - MISSING_CASE(ring); + MISSING_CASE(engine_id); return INVALID_MMIO_REG; } } @@ -275,7 +276,7 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine) static int emit_mocs_control_table(struct drm_i915_gem_request *req, const struct drm_i915_mocs_table *table) { - struct intel_ringbuffer *ringbuf = req->ringbuf; + struct intel_ring *ring = req->ring; enum intel_engine_id engine = req->engine->id; unsigned int index; int ret; @@ -287,14 +288,11 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req, if (ret) return ret; - intel_logical_ring_emit(ringbuf, - MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES)); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES)); for (index = 0; index < table->size; index++) { - intel_logical_ring_emit_reg(ringbuf, - mocs_register(engine, index)); - intel_logical_ring_emit(ringbuf, - table->table[index].control_value); + intel_ring_emit_reg(ring, mocs_register(engine, index)); + intel_ring_emit(ring, table->table[index].control_value); } /* @@ -306,14 +304,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req, * that value to all the used entries. */ for (; index < GEN9_NUM_MOCS_ENTRIES; index++) { - intel_logical_ring_emit_reg(ringbuf, - mocs_register(engine, index)); - intel_logical_ring_emit(ringbuf, - table->table[0].control_value); + intel_ring_emit_reg(ring, mocs_register(engine, index)); + intel_ring_emit(ring, table->table[0].control_value); } - intel_logical_ring_emit(ringbuf, MI_NOOP); - intel_logical_ring_advance(ringbuf); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } @@ -340,7 +336,7 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table, static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, const struct drm_i915_mocs_table *table) { - struct intel_ringbuffer *ringbuf = req->ringbuf; + struct intel_ring *ring = req->ring; unsigned int i; int ret; @@ -351,19 +347,18 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, if (ret) return ret; - intel_logical_ring_emit(ringbuf, + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2)); for (i = 0; i < table->size/2; i++) { - intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i)); - intel_logical_ring_emit(ringbuf, - l3cc_combine(table, 2*i, 2*i+1)); + intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i)); + intel_ring_emit(ring, l3cc_combine(table, 2*i, 2*i+1)); } if (table->size & 0x01) { /* Odd table size - 1 left over */ - intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i)); - intel_logical_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0)); + intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i)); + intel_ring_emit(ring, l3cc_combine(table, 2*i, 0)); i++; } @@ -373,12 +368,12 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, * they are reserved by the hardware. */ for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) { - intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i)); - intel_logical_ring_emit(ringbuf, l3cc_combine(table, 0, 0)); + intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i)); + intel_ring_emit(ring, l3cc_combine(table, 0, 0)); } - intel_logical_ring_emit(ringbuf, MI_NOOP); - intel_logical_ring_advance(ringbuf); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h index 4640299e04ec..a8bd9f7bfece 100644 --- a/drivers/gpu/drm/i915/intel_mocs.h +++ b/drivers/gpu/drm/i915/intel_mocs.h @@ -54,6 +54,6 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req); void intel_mocs_init_l3cc_table(struct drm_device *dev); -int intel_mocs_init_engine(struct intel_engine_cs *ring); +int intel_mocs_init_engine(struct intel_engine_cs *engine); #endif diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index f2584d0a01ab..951e834dd274 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c @@ -25,7 +25,6 @@ #include <linux/slab.h> #include <linux/i2c.h> -#include <linux/fb.h> #include <drm/drm_edid.h> #include <drm/drmP.h> #include "intel_drv.h" diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 3212d8806b5a..a24bc8c7889f 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -30,6 +30,7 @@ #include "i915_drv.h" #include "i915_reg.h" #include "intel_drv.h" +#include "intel_frontbuffer.h" /* Limits for overlay size. According to intel doc, the real limits are: * Y width: 4095, UV width (planar): 2047, Y height: 2047, @@ -170,8 +171,8 @@ struct overlay_registers { struct intel_overlay { struct drm_i915_private *i915; struct intel_crtc *crtc; - struct drm_i915_gem_object *vid_bo; - struct drm_i915_gem_object *old_vid_bo; + struct i915_vma *vma; + struct i915_vma *old_vma; bool active; bool pfit_active; u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */ @@ -183,8 +184,7 @@ struct intel_overlay { u32 flip_addr; struct drm_i915_gem_object *reg_bo; /* flip handling */ - struct drm_i915_gem_request *last_flip_req; - void (*flip_tail)(struct intel_overlay *); + struct i915_gem_active last_flip; }; static struct overlay_registers __iomem * @@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay) if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; else - regs = io_mapping_map_wc(dev_priv->ggtt.mappable, + regs = io_mapping_map_wc(&dev_priv->ggtt.mappable, overlay->flip_addr, PAGE_SIZE); @@ -210,37 +210,46 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay, io_mapping_unmap(regs); } -static int intel_overlay_do_wait_request(struct intel_overlay *overlay, +static void intel_overlay_submit_request(struct intel_overlay *overlay, struct drm_i915_gem_request *req, - void (*tail)(struct intel_overlay *)) + i915_gem_retire_fn retire) { - int ret; - - WARN_ON(overlay->last_flip_req); - i915_gem_request_assign(&overlay->last_flip_req, req); + GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip, + &overlay->i915->drm.struct_mutex)); + overlay->last_flip.retire = retire; + i915_gem_active_set(&overlay->last_flip, req); i915_add_request(req); +} - overlay->flip_tail = tail; - ret = i915_wait_request(overlay->last_flip_req); - if (ret) - return ret; +static int intel_overlay_do_wait_request(struct intel_overlay *overlay, + struct drm_i915_gem_request *req, + i915_gem_retire_fn retire) +{ + intel_overlay_submit_request(overlay, req, retire); + return i915_gem_active_retire(&overlay->last_flip, + &overlay->i915->drm.struct_mutex); +} - i915_gem_request_assign(&overlay->last_flip_req, NULL); - return 0; +static struct drm_i915_gem_request *alloc_request(struct intel_overlay *overlay) +{ + struct drm_i915_private *dev_priv = overlay->i915; + struct intel_engine_cs *engine = &dev_priv->engine[RCS]; + + return i915_gem_request_alloc(engine, dev_priv->kernel_context); } /* overlay needs to be disable in OCMD reg */ static int intel_overlay_on(struct intel_overlay *overlay) { struct drm_i915_private *dev_priv = overlay->i915; - struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct drm_i915_gem_request *req; + struct intel_ring *ring; int ret; WARN_ON(overlay->active); WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); - req = i915_gem_request_alloc(engine, NULL); + req = alloc_request(overlay); if (IS_ERR(req)) return PTR_ERR(req); @@ -252,11 +261,12 @@ static int intel_overlay_on(struct intel_overlay *overlay) overlay->active = true; - intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_ON); - intel_ring_emit(engine, overlay->flip_addr | OFC_UPDATE); - intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + ring = req->ring; + intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON); + intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE); + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return intel_overlay_do_wait_request(overlay, req, NULL); } @@ -266,8 +276,8 @@ static int intel_overlay_continue(struct intel_overlay *overlay, bool load_polyphase_filter) { struct drm_i915_private *dev_priv = overlay->i915; - struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct drm_i915_gem_request *req; + struct intel_ring *ring; u32 flip_addr = overlay->flip_addr; u32 tmp; int ret; @@ -282,7 +292,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, if (tmp & (1 << 17)) DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); - req = i915_gem_request_alloc(engine, NULL); + req = alloc_request(overlay); if (IS_ERR(req)) return PTR_ERR(req); @@ -292,38 +302,48 @@ static int intel_overlay_continue(struct intel_overlay *overlay, return ret; } - intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); - intel_ring_emit(engine, flip_addr); - intel_ring_advance(engine); + ring = req->ring; + intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); + intel_ring_emit(ring, flip_addr); + intel_ring_advance(ring); - WARN_ON(overlay->last_flip_req); - i915_gem_request_assign(&overlay->last_flip_req, req); - i915_add_request(req); + intel_overlay_submit_request(overlay, req, NULL); return 0; } -static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) +static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active, + struct drm_i915_gem_request *req) { - struct drm_i915_gem_object *obj = overlay->old_vid_bo; + struct intel_overlay *overlay = + container_of(active, typeof(*overlay), last_flip); + struct i915_vma *vma; - i915_gem_object_ggtt_unpin(obj); - drm_gem_object_unreference(&obj->base); + vma = fetch_and_zero(&overlay->old_vma); + if (WARN_ON(!vma)) + return; - overlay->old_vid_bo = NULL; + i915_gem_track_fb(vma->obj, NULL, + INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe)); + + i915_gem_object_unpin_from_display_plane(vma); + i915_vma_put(vma); } -static void intel_overlay_off_tail(struct intel_overlay *overlay) +static void intel_overlay_off_tail(struct i915_gem_active *active, + struct drm_i915_gem_request *req) { - struct drm_i915_gem_object *obj = overlay->vid_bo; + struct intel_overlay *overlay = + container_of(active, typeof(*overlay), last_flip); + struct i915_vma *vma; /* never have the overlay hw on without showing a frame */ - if (WARN_ON(!obj)) + vma = fetch_and_zero(&overlay->vma); + if (WARN_ON(!vma)) return; - i915_gem_object_ggtt_unpin(obj); - drm_gem_object_unreference(&obj->base); - overlay->vid_bo = NULL; + i915_gem_object_unpin_from_display_plane(vma); + i915_vma_put(vma); overlay->crtc->overlay = NULL; overlay->crtc = NULL; @@ -334,8 +354,8 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay) static int intel_overlay_off(struct intel_overlay *overlay) { struct drm_i915_private *dev_priv = overlay->i915; - struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct drm_i915_gem_request *req; + struct intel_ring *ring; u32 flip_addr = overlay->flip_addr; int ret; @@ -347,7 +367,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) * of the hw. Do it in both cases */ flip_addr |= OFC_UPDATE; - req = i915_gem_request_alloc(engine, NULL); + req = alloc_request(overlay); if (IS_ERR(req)) return PTR_ERR(req); @@ -357,46 +377,36 @@ static int intel_overlay_off(struct intel_overlay *overlay) return ret; } + ring = req->ring; /* wait for overlay to go idle */ - intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); - intel_ring_emit(engine, flip_addr); - intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); + intel_ring_emit(ring, flip_addr); + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); /* turn overlay off */ if (IS_I830(dev_priv)) { /* Workaround: Don't disable the overlay fully, since otherwise * it dies on the next OVERLAY_ON cmd. */ - intel_ring_emit(engine, MI_NOOP); - intel_ring_emit(engine, MI_NOOP); - intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_NOOP); } else { - intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_OFF); - intel_ring_emit(engine, flip_addr); - intel_ring_emit(engine, + intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF); + intel_ring_emit(ring, flip_addr); + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); } - intel_ring_advance(engine); + intel_ring_advance(ring); - return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail); + return intel_overlay_do_wait_request(overlay, req, + intel_overlay_off_tail); } /* recover from an interruption due to a signal * We have to be careful not to repeat work forever an make forward progess. */ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) { - int ret; - - if (overlay->last_flip_req == NULL) - return 0; - - ret = i915_wait_request(overlay->last_flip_req); - if (ret) - return ret; - - if (overlay->flip_tail) - overlay->flip_tail(overlay); - - i915_gem_request_assign(&overlay->last_flip_req, NULL); - return 0; + return i915_gem_active_retire(&overlay->last_flip, + &overlay->i915->drm.struct_mutex); } /* Wait for pending overlay flip and release old frame. @@ -406,7 +416,6 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) static int intel_overlay_release_old_vid(struct intel_overlay *overlay) { struct drm_i915_private *dev_priv = overlay->i915; - struct intel_engine_cs *engine = &dev_priv->engine[RCS]; int ret; lockdep_assert_held(&dev_priv->drm.struct_mutex); @@ -414,14 +423,15 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) /* Only wait if there is actually an old frame to release to * guarantee forward progress. */ - if (!overlay->old_vid_bo) + if (!overlay->old_vma) return 0; if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) { /* synchronous slowpath */ struct drm_i915_gem_request *req; + struct intel_ring *ring; - req = i915_gem_request_alloc(engine, NULL); + req = alloc_request(overlay); if (IS_ERR(req)) return PTR_ERR(req); @@ -431,22 +441,19 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) return ret; } - intel_ring_emit(engine, + ring = req->ring; + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); ret = intel_overlay_do_wait_request(overlay, req, intel_overlay_release_old_vid_tail); if (ret) return ret; - } + } else + intel_overlay_release_old_vid_tail(&overlay->last_flip, NULL); - intel_overlay_release_old_vid_tail(overlay); - - - i915_gem_track_fb(overlay->old_vid_bo, NULL, - INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe)); return 0; } @@ -459,7 +466,6 @@ void intel_overlay_reset(struct drm_i915_private *dev_priv) intel_overlay_release_old_vid(overlay); - overlay->last_flip_req = NULL; overlay->old_xscale = 0; overlay->old_yscale = 0; overlay->crtc = NULL; @@ -740,6 +746,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, struct drm_i915_private *dev_priv = overlay->i915; u32 swidth, swidthsw, sheight, ostride; enum pipe pipe = overlay->crtc->pipe; + struct i915_vma *vma; lockdep_assert_held(&dev_priv->drm.struct_mutex); WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); @@ -748,12 +755,12 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, if (ret != 0) return ret; - ret = i915_gem_object_pin_to_display_plane(new_bo, 0, + vma = i915_gem_object_pin_to_display_plane(new_bo, 0, &i915_ggtt_view_normal); - if (ret != 0) - return ret; + if (IS_ERR(vma)) + return PTR_ERR(vma); - ret = i915_gem_object_put_fence(new_bo); + ret = i915_vma_put_fence(vma); if (ret) goto out_unpin; @@ -794,7 +801,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, swidth = params->src_w; swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width); sheight = params->src_h; - iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, ®s->OBUF_0Y); + iowrite32(i915_ggtt_offset(vma) + params->offset_Y, ®s->OBUF_0Y); ostride = params->stride_Y; if (params->format & I915_OVERLAY_YUV_PLANAR) { @@ -808,8 +815,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, params->src_w/uv_hscale); swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; sheight |= (params->src_h/uv_vscale) << 16; - iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, ®s->OBUF_0U); - iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, ®s->OBUF_0V); + iowrite32(i915_ggtt_offset(vma) + params->offset_U, + ®s->OBUF_0U); + iowrite32(i915_ggtt_offset(vma) + params->offset_V, + ®s->OBUF_0V); ostride |= params->stride_UV << 16; } @@ -830,19 +839,18 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, if (ret) goto out_unpin; - i915_gem_track_fb(overlay->vid_bo, new_bo, + i915_gem_track_fb(overlay->vma->obj, new_bo, INTEL_FRONTBUFFER_OVERLAY(pipe)); - overlay->old_vid_bo = overlay->vid_bo; - overlay->vid_bo = new_bo; + overlay->old_vma = overlay->vma; + overlay->vma = vma; - intel_frontbuffer_flip(&dev_priv->drm, - INTEL_FRONTBUFFER_OVERLAY(pipe)); + intel_frontbuffer_flip(dev_priv, INTEL_FRONTBUFFER_OVERLAY(pipe)); return 0; out_unpin: - i915_gem_object_ggtt_unpin(new_bo); + i915_gem_object_unpin_from_display_plane(vma); return ret; } @@ -870,12 +878,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay) iowrite32(0, ®s->OCMD); intel_overlay_unmap_regs(overlay, regs); - ret = intel_overlay_off(overlay); - if (ret != 0) - return ret; - - intel_overlay_off_tail(overlay); - return 0; + return intel_overlay_off(overlay); } static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, @@ -1122,9 +1125,8 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, } crtc = to_intel_crtc(drmmode_crtc); - new_bo = to_intel_bo(drm_gem_object_lookup(file_priv, - put_image_rec->bo_handle)); - if (&new_bo->base == NULL) { + new_bo = i915_gem_object_lookup(file_priv, put_image_rec->bo_handle); + if (!new_bo) { ret = -ENOENT; goto out_free; } @@ -1132,7 +1134,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, drm_modeset_lock_all(dev); mutex_lock(&dev->struct_mutex); - if (new_bo->tiling_mode) { + if (i915_gem_object_is_tiled(new_bo)) { DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n"); ret = -EINVAL; goto out_unlock; @@ -1220,7 +1222,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, out_unlock: mutex_unlock(&dev->struct_mutex); drm_modeset_unlock_all(dev); - drm_gem_object_unreference_unlocked(&new_bo->base); + i915_gem_object_put_unlocked(new_bo); out_free: kfree(params); @@ -1371,6 +1373,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv) struct intel_overlay *overlay; struct drm_i915_gem_object *reg_bo; struct overlay_registers __iomem *regs; + struct i915_vma *vma = NULL; int ret; if (!HAS_OVERLAY(dev_priv)) @@ -1404,12 +1407,14 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv) } overlay->flip_addr = reg_bo->phys_handle->busaddr; } else { - ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE); - if (ret) { + vma = i915_gem_object_ggtt_pin(reg_bo, NULL, + 0, PAGE_SIZE, PIN_MAPPABLE); + if (IS_ERR(vma)) { DRM_ERROR("failed to pin overlay register bo\n"); + ret = PTR_ERR(vma); goto out_free_bo; } - overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo); + overlay->flip_addr = i915_ggtt_offset(vma); ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); if (ret) { @@ -1441,10 +1446,10 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv) return; out_unpin_bo: - if (!OVERLAY_NEEDS_PHYSICAL(dev_priv)) - i915_gem_object_ggtt_unpin(reg_bo); + if (vma) + i915_vma_unpin(vma); out_free_bo: - drm_gem_object_unreference(®_bo->base); + i915_gem_object_put(reg_bo); out_free: mutex_unlock(&dev_priv->drm.struct_mutex); kfree(overlay); @@ -1461,7 +1466,7 @@ void intel_cleanup_overlay(struct drm_i915_private *dev_priv) * hardware should be off already */ WARN_ON(dev_priv->overlay->active); - drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base); + i915_gem_object_put_unlocked(dev_priv->overlay->reg_bo); kfree(dev_priv->overlay); } @@ -1484,7 +1489,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) regs = (struct overlay_registers __iomem *) overlay->reg_bo->phys_handle->vaddr; else - regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable, + regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.mappable, overlay->flip_addr); return regs; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index f4f3fcc8b3be..8a6751e14ab9 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -340,6 +340,11 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) I915_WRITE(FW_BLC_SELF, val); POSTING_READ(FW_BLC_SELF); } else if (IS_I915GM(dev)) { + /* + * FIXME can't find a bit like this for 915G, and + * and yet it does have the related watermark in + * FW_BLC_SELF. What's going on? + */ val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : _MASKED_BIT_DISABLE(INSTPM_SELF_EN); I915_WRITE(INSTPM, val); @@ -960,7 +965,7 @@ static uint16_t vlv_compute_wm_level(struct intel_plane *plane, if (dev_priv->wm.pri_latency[level] == 0) return USHRT_MAX; - if (!state->visible) + if (!state->base.visible) return 0; cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0); @@ -1002,7 +1007,7 @@ static void vlv_compute_fifo(struct intel_crtc *crtc) if (plane->base.type == DRM_PLANE_TYPE_CURSOR) continue; - if (state->visible) { + if (state->base.visible) { wm_state->num_active_planes++; total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0); } @@ -1018,7 +1023,7 @@ static void vlv_compute_fifo(struct intel_crtc *crtc) continue; } - if (!state->visible) { + if (!state->base.visible) { plane->wm.fifo_size = 0; continue; } @@ -1118,7 +1123,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc) struct intel_plane_state *state = to_intel_plane_state(plane->base.state); - if (!state->visible) + if (!state->base.visible) continue; /* normal watermarks */ @@ -1580,7 +1585,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) obj = intel_fb_obj(enabled->primary->state->fb); /* self-refresh seems busted with untiled */ - if (obj->tiling_mode == I915_TILING_NONE) + if (!i915_gem_object_is_tiled(obj)) enabled = NULL; } @@ -1604,6 +1609,9 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) unsigned long line_time_us; int entries; + if (IS_I915GM(dev) || IS_I945GM(dev)) + cpp = 4; + line_time_us = max(htotal * 1000 / clock, 1); /* Use ns/us then divide to preserve precision */ @@ -1618,7 +1626,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) if (IS_I945G(dev) || IS_I945GM(dev)) I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); - else if (IS_I915GM(dev)) + else I915_WRITE(FW_BLC_SELF, srwm & 0x3f); } @@ -1767,7 +1775,7 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate, drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0; uint32_t method1, method2; - if (!cstate->base.active || !pstate->visible) + if (!cstate->base.active || !pstate->base.visible) return 0; method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value); @@ -1777,7 +1785,7 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate, method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate), cstate->base.adjusted_mode.crtc_htotal, - drm_rect_width(&pstate->dst), + drm_rect_width(&pstate->base.dst), cpp, mem_value); return min(method1, method2); @@ -1795,13 +1803,13 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate, drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0; uint32_t method1, method2; - if (!cstate->base.active || !pstate->visible) + if (!cstate->base.active || !pstate->base.visible) return 0; method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value); method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate), cstate->base.adjusted_mode.crtc_htotal, - drm_rect_width(&pstate->dst), + drm_rect_width(&pstate->base.dst), cpp, mem_value); return min(method1, method2); } @@ -1820,7 +1828,7 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate, * this is necessary to avoid flickering. */ int cpp = 4; - int width = pstate->visible ? pstate->base.crtc_w : 64; + int width = pstate->base.visible ? pstate->base.crtc_w : 64; if (!cstate->base.active) return 0; @@ -1838,10 +1846,10 @@ static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, int cpp = pstate->base.fb ? drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0; - if (!cstate->base.active || !pstate->visible) + if (!cstate->base.active || !pstate->base.visible) return 0; - return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), cpp); + return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp); } static unsigned int ilk_display_fifo_size(const struct drm_device *dev) @@ -2358,10 +2366,10 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) pipe_wm->pipe_enabled = cstate->base.active; if (sprstate) { - pipe_wm->sprites_enabled = sprstate->visible; - pipe_wm->sprites_scaled = sprstate->visible && - (drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 || - drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16); + pipe_wm->sprites_enabled = sprstate->base.visible; + pipe_wm->sprites_scaled = sprstate->base.visible && + (drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 || + drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16); } usable_level = max_level; @@ -2996,14 +3004,14 @@ skl_plane_downscale_amount(const struct intel_plane_state *pstate) uint32_t downscale_h, downscale_w; uint32_t src_w, src_h, dst_w, dst_h; - if (WARN_ON(!pstate->visible)) + if (WARN_ON(!pstate->base.visible)) return DRM_PLANE_HELPER_NO_SCALING; /* n.b., src is 16.16 fixed point, dst is whole integer */ - src_w = drm_rect_width(&pstate->src); - src_h = drm_rect_height(&pstate->src); - dst_w = drm_rect_width(&pstate->dst); - dst_h = drm_rect_height(&pstate->dst); + src_w = drm_rect_width(&pstate->base.src); + src_h = drm_rect_height(&pstate->base.src); + dst_w = drm_rect_width(&pstate->base.dst); + dst_h = drm_rect_height(&pstate->base.dst); if (intel_rotation_90_or_270(pstate->base.rotation)) swap(dst_w, dst_h); @@ -3025,15 +3033,15 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, uint32_t width = 0, height = 0; unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888; - if (!intel_pstate->visible) + if (!intel_pstate->base.visible) return 0; if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR) return 0; if (y && format != DRM_FORMAT_NV12) return 0; - width = drm_rect_width(&intel_pstate->src) >> 16; - height = drm_rect_height(&intel_pstate->src) >> 16; + width = drm_rect_width(&intel_pstate->base.src) >> 16; + height = drm_rect_height(&intel_pstate->base.src) >> 16; if (intel_rotation_90_or_270(pstate->rotation)) swap(width, height); @@ -3107,8 +3115,6 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate) total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id]; } - WARN_ON(cstate->plane_mask && total_data_rate == 0); - return total_data_rate; } @@ -3134,8 +3140,8 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate, fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED) return 8; - src_w = drm_rect_width(&intel_pstate->src) >> 16; - src_h = drm_rect_height(&intel_pstate->src) >> 16; + src_w = drm_rect_width(&intel_pstate->base.src) >> 16; + src_h = drm_rect_height(&intel_pstate->base.src) >> 16; if (intel_rotation_90_or_270(pstate->rotation)) swap(src_w, src_h); @@ -3226,7 +3232,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, if (intel_plane->pipe != pipe) continue; - if (!to_intel_plane_state(pstate)->visible) { + if (!to_intel_plane_state(pstate)->base.visible) { minimum[id] = 0; y_minimum[id] = 0; continue; @@ -3344,6 +3350,8 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, plane_bytes_per_line *= 4; plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); plane_blocks_per_line /= 4; + } else if (tiling == DRM_FORMAT_MOD_NONE) { + plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1; } else { plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); } @@ -3363,7 +3371,7 @@ static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst uint64_t pixel_rate; /* Shouldn't reach here on disabled planes... */ - if (WARN_ON(!pstate->visible)) + if (WARN_ON(!pstate->base.visible)) return 0; /* @@ -3399,13 +3407,13 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, uint32_t width = 0, height = 0; uint32_t plane_pixel_rate; - if (latency == 0 || !cstate->base.active || !intel_pstate->visible) { + if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) { *enabled = false; return 0; } - width = drm_rect_width(&intel_pstate->src) >> 16; - height = drm_rect_height(&intel_pstate->src) >> 16; + width = drm_rect_width(&intel_pstate->base.src) >> 16; + height = drm_rect_height(&intel_pstate->base.src) >> 16; if (intel_rotation_90_or_270(pstate->rotation)) swap(width, height); @@ -3910,9 +3918,24 @@ skl_compute_ddb(struct drm_atomic_state *state) * pretend that all pipes switched active status so that we'll * ensure a full DDB recompute. */ - if (dev_priv->wm.distrust_bios_wm) + if (dev_priv->wm.distrust_bios_wm) { + ret = drm_modeset_lock(&dev->mode_config.connection_mutex, + state->acquire_ctx); + if (ret) + return ret; + intel_state->active_pipe_changes = ~0; + /* + * We usually only initialize intel_state->active_crtcs if we + * we're doing a modeset; make sure this field is always + * initialized during the sanitization process that happens + * on the first commit too. + */ + if (!intel_state->modeset) + intel_state->active_crtcs = dev_priv->active_crtcs; + } + /* * If the modeset changes which CRTC's are active, we need to * recompute the DDB allocation for *all* active pipes, even @@ -4892,7 +4915,8 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) else gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); dev_priv->rps.last_adj = 0; - I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); + I915_WRITE(GEN6_PMINTRMSK, + gen6_sanitize_rps_pm_mask(dev_priv, ~0)); } mutex_unlock(&dev_priv->rps.hw_lock); @@ -4911,7 +4935,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv, */ if (!(dev_priv->gt.awake && dev_priv->rps.enabled && - dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)) + dev_priv->rps.cur_freq < dev_priv->rps.boost_freq)) return; /* Force a RPS boost (and don't count it against the client) if @@ -5102,35 +5126,31 @@ int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6) static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv) { - uint32_t rp_state_cap; - u32 ddcc_status = 0; - int ret; - /* All of these values are in units of 50MHz */ - dev_priv->rps.cur_freq = 0; + /* static values from HW: RP0 > RP1 > RPn (min_freq) */ if (IS_BROXTON(dev_priv)) { - rp_state_cap = I915_READ(BXT_RP_STATE_CAP); + u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff; dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff; } else { - rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); + u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff; dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff; } - /* hw_max = RP0 until we check for overclocking */ - dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; + dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { - ret = sandybridge_pcode_read(dev_priv, - HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, - &ddcc_status); - if (0 == ret) + u32 ddcc_status = 0; + + if (sandybridge_pcode_read(dev_priv, + HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, + &ddcc_status) == 0) dev_priv->rps.efficient_freq = clamp_t(u8, ((ddcc_status >> 8) & 0xff), @@ -5140,29 +5160,26 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv) if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { /* Store the frequency values in 16.66 MHZ units, which is - the natural hardware unit for SKL */ + * the natural hardware unit for SKL + */ dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER; dev_priv->rps.min_freq *= GEN9_FREQ_SCALER; dev_priv->rps.max_freq *= GEN9_FREQ_SCALER; dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER; } +} - dev_priv->rps.idle_freq = dev_priv->rps.min_freq; +static void reset_rps(struct drm_i915_private *dev_priv, + void (*set)(struct drm_i915_private *, u8)) +{ + u8 freq = dev_priv->rps.cur_freq; - /* Preserve min/max settings in case of re-init */ - if (dev_priv->rps.max_freq_softlimit == 0) - dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; + /* force a reset */ + dev_priv->rps.power = -1; + dev_priv->rps.cur_freq = -1; - if (dev_priv->rps.min_freq_softlimit == 0) { - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - dev_priv->rps.min_freq_softlimit = - max_t(int, dev_priv->rps.efficient_freq, - intel_freq_opcode(dev_priv, 450)); - else - dev_priv->rps.min_freq_softlimit = - dev_priv->rps.min_freq; - } + set(dev_priv, freq); } /* See the Gen9_GT_PM_Programming_Guide doc for the below */ @@ -5170,8 +5187,6 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv) { intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); - gen6_init_rps_frequencies(dev_priv); - /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { /* @@ -5201,8 +5216,7 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv) /* Leaning on the below call to gen6_set_rps to program/setup the * Up/Down EI & threshold registers, as well as the RP_CONTROL, * RP_INTERRUPT_LIMITS & RPNSWREQ registers */ - dev_priv->rps.power = HIGH_POWER; /* force a reset */ - gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); + reset_rps(dev_priv, gen6_set_rps); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } @@ -5289,9 +5303,6 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv) /* 2a: Disable RC states. */ I915_WRITE(GEN6_RC_CONTROL, 0); - /* Initialize rps frequencies */ - gen6_init_rps_frequencies(dev_priv); - /* 2b: Program RC6 thresholds.*/ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ @@ -5348,8 +5359,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv) /* 6: Ring frequency + overclocking (our driver does this later */ - dev_priv->rps.power = HIGH_POWER; /* force a reset */ - gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); + reset_rps(dev_priv, gen6_set_rps); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } @@ -5357,7 +5367,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv) static void gen6_enable_rps(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; - u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; + u32 rc6vids, rc6_mask = 0; u32 gtfifodbg; int rc6_mode; int ret; @@ -5381,9 +5391,6 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv) intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); - /* Initialize rps frequencies */ - gen6_init_rps_frequencies(dev_priv); - /* disable the counters and set deterministic thresholds */ I915_WRITE(GEN6_RC_CONTROL, 0); @@ -5434,16 +5441,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv) if (ret) DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); - ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); - if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */ - DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n", - (dev_priv->rps.max_freq_softlimit & 0xff) * 50, - (pcu_mbox & 0xff) * 50); - dev_priv->rps.max_freq = pcu_mbox & 0xff; - } - - dev_priv->rps.power = HIGH_POWER; /* force a reset */ - gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); + reset_rps(dev_priv, gen6_set_rps); rc6vids = 0; ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); @@ -5462,7 +5460,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv) intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv) +static void gen6_update_ring_freq(struct drm_i915_private *dev_priv) { int min_freq = 15; unsigned int gpu_freq; @@ -5546,16 +5544,6 @@ static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv) } } -void gen6_update_ring_freq(struct drm_i915_private *dev_priv) -{ - if (!HAS_CORE_RING_FREQ(dev_priv)) - return; - - mutex_lock(&dev_priv->rps.hw_lock); - __gen6_update_ring_freq(dev_priv); - mutex_unlock(&dev_priv->rps.hw_lock); -} - static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) { u32 val, rp0; @@ -5700,8 +5688,6 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv) u32 pcbr; int pctx_size = 24*1024; - mutex_lock(&dev_priv->drm.struct_mutex); - pcbr = I915_READ(VLV_PCBR); if (pcbr) { /* BIOS set it up already, grab the pre-alloc'd space */ @@ -5737,7 +5723,6 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv) out: DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); dev_priv->vlv_pctx = pctx; - mutex_unlock(&dev_priv->drm.struct_mutex); } static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv) @@ -5745,7 +5730,7 @@ static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv) if (WARN_ON(!dev_priv->vlv_pctx)) return; - drm_gem_object_unreference_unlocked(&dev_priv->vlv_pctx->base); + i915_gem_object_put_unlocked(dev_priv->vlv_pctx); dev_priv->vlv_pctx = NULL; } @@ -5768,8 +5753,6 @@ static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv) vlv_init_gpll_ref_freq(dev_priv); - mutex_lock(&dev_priv->rps.hw_lock); - val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); switch ((val >> 6) & 3) { case 0: @@ -5805,17 +5788,6 @@ static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv) DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), dev_priv->rps.min_freq); - - dev_priv->rps.idle_freq = dev_priv->rps.min_freq; - - /* Preserve min/max settings in case of re-init */ - if (dev_priv->rps.max_freq_softlimit == 0) - dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; - - if (dev_priv->rps.min_freq_softlimit == 0) - dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; - - mutex_unlock(&dev_priv->rps.hw_lock); } static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv) @@ -5826,8 +5798,6 @@ static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv) vlv_init_gpll_ref_freq(dev_priv); - mutex_lock(&dev_priv->rps.hw_lock); - mutex_lock(&dev_priv->sb_lock); val = vlv_cck_read(dev_priv, CCK_FUSE_REG); mutex_unlock(&dev_priv->sb_lock); @@ -5869,17 +5839,6 @@ static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv) dev_priv->rps.rp1_freq | dev_priv->rps.min_freq) & 1, "Odd GPU freq values\n"); - - dev_priv->rps.idle_freq = dev_priv->rps.min_freq; - - /* Preserve min/max settings in case of re-init */ - if (dev_priv->rps.max_freq_softlimit == 0) - dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; - - if (dev_priv->rps.min_freq_softlimit == 0) - dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; - - mutex_unlock(&dev_priv->rps.hw_lock); } static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv) @@ -5970,16 +5929,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv) DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); - dev_priv->rps.cur_freq = (val >> 8) & 0xff; - DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", - intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq), - dev_priv->rps.cur_freq); - - DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", - intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), - dev_priv->rps.idle_freq); - - valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq); + reset_rps(dev_priv, valleyview_set_rps); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } @@ -6059,16 +6009,7 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv) DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); - dev_priv->rps.cur_freq = (val >> 8) & 0xff; - DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", - intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq), - dev_priv->rps.cur_freq); - - DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", - intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), - dev_priv->rps.idle_freq); - - valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq); + reset_rps(dev_priv, valleyview_set_rps); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } @@ -6397,19 +6338,11 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower); */ bool i915_gpu_busy(void) { - struct drm_i915_private *dev_priv; - struct intel_engine_cs *engine; bool ret = false; spin_lock_irq(&mchdev_lock); - if (!i915_mch_dev) - goto out_unlock; - dev_priv = i915_mch_dev; - - for_each_engine(engine, dev_priv) - ret |= !list_empty(&engine->request_list); - -out_unlock: + if (i915_mch_dev) + ret = i915_mch_dev->gt.awake; spin_unlock_irq(&mchdev_lock); return ret; @@ -6565,30 +6498,62 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) intel_runtime_pm_get(dev_priv); } + mutex_lock(&dev_priv->drm.struct_mutex); + mutex_lock(&dev_priv->rps.hw_lock); + + /* Initialize RPS limits (for userspace) */ if (IS_CHERRYVIEW(dev_priv)) cherryview_init_gt_powersave(dev_priv); else if (IS_VALLEYVIEW(dev_priv)) valleyview_init_gt_powersave(dev_priv); + else if (INTEL_GEN(dev_priv) >= 6) + gen6_init_rps_frequencies(dev_priv); + + /* Derive initial user preferences/limits from the hardware limits */ + dev_priv->rps.idle_freq = dev_priv->rps.min_freq; + dev_priv->rps.cur_freq = dev_priv->rps.idle_freq; + + dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; + dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; + + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + dev_priv->rps.min_freq_softlimit = + max_t(int, + dev_priv->rps.efficient_freq, + intel_freq_opcode(dev_priv, 450)); + + /* After setting max-softlimit, find the overclock max freq */ + if (IS_GEN6(dev_priv) || + IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) { + u32 params = 0; + + sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, ¶ms); + if (params & BIT(31)) { /* OC supported */ + DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n", + (dev_priv->rps.max_freq & 0xff) * 50, + (params & 0xff) * 50); + dev_priv->rps.max_freq = params & 0xff; + } + } + + /* Finally allow us to boost to max by default */ + dev_priv->rps.boost_freq = dev_priv->rps.max_freq; + + mutex_unlock(&dev_priv->rps.hw_lock); + mutex_unlock(&dev_priv->drm.struct_mutex); + + intel_autoenable_gt_powersave(dev_priv); } void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) { - if (IS_CHERRYVIEW(dev_priv)) - return; - else if (IS_VALLEYVIEW(dev_priv)) + if (IS_VALLEYVIEW(dev_priv)) valleyview_cleanup_gt_powersave(dev_priv); if (!i915.enable_rc6) intel_runtime_pm_put(dev_priv); } -static void gen6_suspend_rps(struct drm_i915_private *dev_priv) -{ - flush_delayed_work(&dev_priv->rps.delayed_resume_work); - - gen6_disable_rps_interrupts(dev_priv); -} - /** * intel_suspend_gt_powersave - suspend PM work and helper threads * @dev_priv: i915 device @@ -6602,60 +6567,76 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv) if (INTEL_GEN(dev_priv) < 6) return; - gen6_suspend_rps(dev_priv); + if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work)) + intel_runtime_pm_put(dev_priv); + + /* gen6_rps_idle() will be called later to disable interrupts */ +} + +void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv) +{ + dev_priv->rps.enabled = true; /* force disabling */ + intel_disable_gt_powersave(dev_priv); - /* Force GPU to min freq during suspend */ - gen6_rps_idle(dev_priv); + gen6_reset_rps_interrupts(dev_priv); } void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) { - if (IS_IRONLAKE_M(dev_priv)) { - ironlake_disable_drps(dev_priv); - } else if (INTEL_INFO(dev_priv)->gen >= 6) { - intel_suspend_gt_powersave(dev_priv); + if (!READ_ONCE(dev_priv->rps.enabled)) + return; - mutex_lock(&dev_priv->rps.hw_lock); - if (INTEL_INFO(dev_priv)->gen >= 9) { - gen9_disable_rc6(dev_priv); - gen9_disable_rps(dev_priv); - } else if (IS_CHERRYVIEW(dev_priv)) - cherryview_disable_rps(dev_priv); - else if (IS_VALLEYVIEW(dev_priv)) - valleyview_disable_rps(dev_priv); - else - gen6_disable_rps(dev_priv); + mutex_lock(&dev_priv->rps.hw_lock); - dev_priv->rps.enabled = false; - mutex_unlock(&dev_priv->rps.hw_lock); + if (INTEL_GEN(dev_priv) >= 9) { + gen9_disable_rc6(dev_priv); + gen9_disable_rps(dev_priv); + } else if (IS_CHERRYVIEW(dev_priv)) { + cherryview_disable_rps(dev_priv); + } else if (IS_VALLEYVIEW(dev_priv)) { + valleyview_disable_rps(dev_priv); + } else if (INTEL_GEN(dev_priv) >= 6) { + gen6_disable_rps(dev_priv); + } else if (IS_IRONLAKE_M(dev_priv)) { + ironlake_disable_drps(dev_priv); } + + dev_priv->rps.enabled = false; + mutex_unlock(&dev_priv->rps.hw_lock); } -static void intel_gen6_powersave_work(struct work_struct *work) +void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, - rps.delayed_resume_work.work); + /* We shouldn't be disabling as we submit, so this should be less + * racy than it appears! + */ + if (READ_ONCE(dev_priv->rps.enabled)) + return; - mutex_lock(&dev_priv->rps.hw_lock); + /* Powersaving is controlled by the host when inside a VM */ + if (intel_vgpu_active(dev_priv)) + return; - gen6_reset_rps_interrupts(dev_priv); + mutex_lock(&dev_priv->rps.hw_lock); if (IS_CHERRYVIEW(dev_priv)) { cherryview_enable_rps(dev_priv); } else if (IS_VALLEYVIEW(dev_priv)) { valleyview_enable_rps(dev_priv); - } else if (INTEL_INFO(dev_priv)->gen >= 9) { + } else if (INTEL_GEN(dev_priv) >= 9) { gen9_enable_rc6(dev_priv); gen9_enable_rps(dev_priv); if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) - __gen6_update_ring_freq(dev_priv); + gen6_update_ring_freq(dev_priv); } else if (IS_BROADWELL(dev_priv)) { gen8_enable_rps(dev_priv); - __gen6_update_ring_freq(dev_priv); - } else { + gen6_update_ring_freq(dev_priv); + } else if (INTEL_GEN(dev_priv) >= 6) { gen6_enable_rps(dev_priv); - __gen6_update_ring_freq(dev_priv); + gen6_update_ring_freq(dev_priv); + } else if (IS_IRONLAKE_M(dev_priv)) { + ironlake_enable_drps(dev_priv); + intel_init_emon(dev_priv); } WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq); @@ -6665,18 +6646,47 @@ static void intel_gen6_powersave_work(struct work_struct *work) WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq); dev_priv->rps.enabled = true; + mutex_unlock(&dev_priv->rps.hw_lock); +} - gen6_enable_rps_interrupts(dev_priv); +static void __intel_autoenable_gt_powersave(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), rps.autoenable_work.work); + struct intel_engine_cs *rcs; + struct drm_i915_gem_request *req; - mutex_unlock(&dev_priv->rps.hw_lock); + if (READ_ONCE(dev_priv->rps.enabled)) + goto out; + + rcs = &dev_priv->engine[RCS]; + if (rcs->last_context) + goto out; + if (!rcs->init_context) + goto out; + + mutex_lock(&dev_priv->drm.struct_mutex); + + req = i915_gem_request_alloc(rcs, dev_priv->kernel_context); + if (IS_ERR(req)) + goto unlock; + + if (!i915.enable_execlists && i915_switch_context(req) == 0) + rcs->init_context(req); + + /* Mark the device busy, calling intel_enable_gt_powersave() */ + i915_add_request_no_flush(req); + +unlock: + mutex_unlock(&dev_priv->drm.struct_mutex); +out: intel_runtime_pm_put(dev_priv); } -void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) +void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv) { - /* Powersaving is controlled by the host when inside a VM */ - if (intel_vgpu_active(dev_priv)) + if (READ_ONCE(dev_priv->rps.enabled)) return; if (IS_IRONLAKE_M(dev_priv)) { @@ -6697,21 +6707,13 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) * paths, so the _noresume version is enough (and in case of * runtime resume it's necessary). */ - if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work, - round_jiffies_up_relative(HZ))) + if (queue_delayed_work(dev_priv->wq, + &dev_priv->rps.autoenable_work, + round_jiffies_up_relative(HZ))) intel_runtime_pm_get_noresume(dev_priv); } } -void intel_reset_gt_powersave(struct drm_i915_private *dev_priv) -{ - if (INTEL_INFO(dev_priv)->gen < 6) - return; - - gen6_suspend_rps(dev_priv); - dev_priv->rps.enabled = false; -} - static void ibx_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -7786,7 +7788,7 @@ static void __intel_rps_boost_work(struct work_struct *work) if (!i915_gem_request_completed(req)) gen6_rps_boost(req->i915, NULL, req->emitted_jiffies); - i915_gem_request_unreference(req); + i915_gem_request_put(req); kfree(boost); } @@ -7804,8 +7806,7 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req) if (boost == NULL) return; - i915_gem_request_reference(req); - boost->req = req; + boost->req = i915_gem_request_get(req); INIT_WORK(&boost->work, __intel_rps_boost_work); queue_work(req->i915->wq, &boost->work); @@ -7818,11 +7819,9 @@ void intel_pm_setup(struct drm_device *dev) mutex_init(&dev_priv->rps.hw_lock); spin_lock_init(&dev_priv->rps.client_lock); - INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, - intel_gen6_powersave_work); + INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work, + __intel_autoenable_gt_powersave); INIT_LIST_HEAD(&dev_priv->rps.clients); - INIT_LIST_HEAD(&dev_priv->rps.semaphores.link); - INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link); dev_priv->pm.suspended = false; atomic_set(&dev_priv->pm.wakeref_count, 0); diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 2b0d1baf15b3..59a21c9d2e43 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -645,9 +645,8 @@ unlock: mutex_unlock(&dev_priv->psr.lock); } -static void intel_psr_exit(struct drm_device *dev) +static void intel_psr_exit(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_dp *intel_dp = dev_priv->psr.enabled; struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; enum pipe pipe = to_intel_crtc(crtc)->pipe; @@ -656,7 +655,7 @@ static void intel_psr_exit(struct drm_device *dev) if (!dev_priv->psr.active) return; - if (HAS_DDI(dev)) { + if (HAS_DDI(dev_priv)) { val = I915_READ(EDP_PSR_CTL); WARN_ON(!(val & EDP_PSR_ENABLE)); @@ -691,7 +690,7 @@ static void intel_psr_exit(struct drm_device *dev) /** * intel_psr_single_frame_update - Single Frame Update - * @dev: DRM device + * @dev_priv: i915 device * @frontbuffer_bits: frontbuffer plane tracking bits * * Some platforms support a single frame update feature that is used to @@ -699,10 +698,9 @@ static void intel_psr_exit(struct drm_device *dev) * So far it is only implemented for Valleyview and Cherryview because * hardware requires this to be done before a page flip. */ -void intel_psr_single_frame_update(struct drm_device *dev, +void intel_psr_single_frame_update(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits) { - struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc; enum pipe pipe; u32 val; @@ -711,7 +709,7 @@ void intel_psr_single_frame_update(struct drm_device *dev, * Single frame update is already supported on BDW+ but it requires * many W/A and it isn't really needed. */ - if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) + if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) return; mutex_lock(&dev_priv->psr.lock); @@ -737,7 +735,7 @@ void intel_psr_single_frame_update(struct drm_device *dev, /** * intel_psr_invalidate - Invalidade PSR - * @dev: DRM device + * @dev_priv: i915 device * @frontbuffer_bits: frontbuffer plane tracking bits * * Since the hardware frontbuffer tracking has gaps we need to integrate @@ -747,10 +745,9 @@ void intel_psr_single_frame_update(struct drm_device *dev, * * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits." */ -void intel_psr_invalidate(struct drm_device *dev, +void intel_psr_invalidate(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits) { - struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc; enum pipe pipe; @@ -767,14 +764,14 @@ void intel_psr_invalidate(struct drm_device *dev, dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits; if (frontbuffer_bits) - intel_psr_exit(dev); + intel_psr_exit(dev_priv); mutex_unlock(&dev_priv->psr.lock); } /** * intel_psr_flush - Flush PSR - * @dev: DRM device + * @dev_priv: i915 device * @frontbuffer_bits: frontbuffer plane tracking bits * @origin: which operation caused the flush * @@ -785,10 +782,9 @@ void intel_psr_invalidate(struct drm_device *dev, * * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits. */ -void intel_psr_flush(struct drm_device *dev, +void intel_psr_flush(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits, enum fb_op_origin origin) { - struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc; enum pipe pipe; @@ -806,7 +802,7 @@ void intel_psr_flush(struct drm_device *dev, /* By definition flush = invalidate + flush */ if (frontbuffer_bits) - intel_psr_exit(dev); + intel_psr_exit(dev_priv); if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) if (!work_busy(&dev_priv->psr.work.work)) diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h index 5bd69852752c..08f6fea05a2c 100644 --- a/drivers/gpu/drm/i915/intel_renderstate.h +++ b/drivers/gpu/drm/i915/intel_renderstate.h @@ -24,12 +24,13 @@ #ifndef _INTEL_RENDERSTATE_H #define _INTEL_RENDERSTATE_H -#include "i915_drv.h" +#include <linux/types.h> -extern const struct intel_renderstate_rodata gen6_null_state; -extern const struct intel_renderstate_rodata gen7_null_state; -extern const struct intel_renderstate_rodata gen8_null_state; -extern const struct intel_renderstate_rodata gen9_null_state; +struct intel_renderstate_rodata { + const u32 *reloc; + const u32 *batch; + const u32 batch_items; +}; #define RO_RENDERSTATE(_g) \ const struct intel_renderstate_rodata gen ## _g ## _null_state = { \ @@ -38,4 +39,9 @@ extern const struct intel_renderstate_rodata gen9_null_state; .batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \ } +extern const struct intel_renderstate_rodata gen6_null_state; +extern const struct intel_renderstate_rodata gen7_null_state; +extern const struct intel_renderstate_rodata gen8_null_state; +extern const struct intel_renderstate_rodata gen9_null_state; + #endif /* INTEL_RENDERSTATE_H */ diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index cca7792f26d5..cc5bcd14b6df 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -47,57 +47,44 @@ int __intel_ring_space(int head, int tail, int size) return space - I915_RING_FREE_SPACE; } -void intel_ring_update_space(struct intel_ringbuffer *ringbuf) +void intel_ring_update_space(struct intel_ring *ring) { - if (ringbuf->last_retired_head != -1) { - ringbuf->head = ringbuf->last_retired_head; - ringbuf->last_retired_head = -1; + if (ring->last_retired_head != -1) { + ring->head = ring->last_retired_head; + ring->last_retired_head = -1; } - ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR, - ringbuf->tail, ringbuf->size); -} - -static void __intel_ring_advance(struct intel_engine_cs *engine) -{ - struct intel_ringbuffer *ringbuf = engine->buffer; - ringbuf->tail &= ringbuf->size - 1; - engine->write_tail(engine, ringbuf->tail); + ring->space = __intel_ring_space(ring->head & HEAD_ADDR, + ring->tail, ring->size); } static int -gen2_render_ring_flush(struct drm_i915_gem_request *req, - u32 invalidate_domains, - u32 flush_domains) +gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) { - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; u32 cmd; int ret; cmd = MI_FLUSH; - if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0) - cmd |= MI_NO_WRITE_FLUSH; - if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) + if (mode & EMIT_INVALIDATE) cmd |= MI_READ_FLUSH; ret = intel_ring_begin(req, 2); if (ret) return ret; - intel_ring_emit(engine, cmd); - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + intel_ring_emit(ring, cmd); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } static int -gen4_render_ring_flush(struct drm_i915_gem_request *req, - u32 invalidate_domains, - u32 flush_domains) +gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) { - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; u32 cmd; int ret; @@ -129,23 +116,20 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, * are flushed at any MI_FLUSH. */ - cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; - if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) - cmd &= ~MI_NO_WRITE_FLUSH; - if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) + cmd = MI_FLUSH; + if (mode & EMIT_INVALIDATE) { cmd |= MI_EXE_FLUSH; - - if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && - (IS_G4X(req->i915) || IS_GEN5(req->i915))) - cmd |= MI_INVALIDATE_ISP; + if (IS_G4X(req->i915) || IS_GEN5(req->i915)) + cmd |= MI_INVALIDATE_ISP; + } ret = intel_ring_begin(req, 2); if (ret) return ret; - intel_ring_emit(engine, cmd); - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + intel_ring_emit(ring, cmd); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } @@ -190,45 +174,46 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, static int intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req) { - struct intel_engine_cs *engine = req->engine; - u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; + struct intel_ring *ring = req->ring; + u32 scratch_addr = + i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; int ret; ret = intel_ring_begin(req, 6); if (ret) return ret; - intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5)); - intel_ring_emit(engine, PIPE_CONTROL_CS_STALL | + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); + intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD); - intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ - intel_ring_emit(engine, 0); /* low dword */ - intel_ring_emit(engine, 0); /* high dword */ - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, 0); /* low dword */ + intel_ring_emit(ring, 0); /* high dword */ + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); ret = intel_ring_begin(req, 6); if (ret) return ret; - intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5)); - intel_ring_emit(engine, PIPE_CONTROL_QW_WRITE); - intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ - intel_ring_emit(engine, 0); - intel_ring_emit(engine, 0); - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); + intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); + intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } static int -gen6_render_ring_flush(struct drm_i915_gem_request *req, - u32 invalidate_domains, u32 flush_domains) +gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) { - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; + u32 scratch_addr = + i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; u32 flags = 0; - u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; int ret; /* Force SNB workarounds for PIPE_CONTROL flushes */ @@ -240,7 +225,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, * number of bits based on the write domains has little performance * impact. */ - if (flush_domains) { + if (mode & EMIT_FLUSH) { flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; /* @@ -249,7 +234,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, */ flags |= PIPE_CONTROL_CS_STALL; } - if (invalidate_domains) { + if (mode & EMIT_INVALIDATE) { flags |= PIPE_CONTROL_TLB_INVALIDATE; flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; @@ -266,11 +251,11 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, if (ret) return ret; - intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4)); - intel_ring_emit(engine, flags); - intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(engine, 0); - intel_ring_advance(engine); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); + intel_ring_emit(ring, flags); + intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); return 0; } @@ -278,30 +263,31 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, static int gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req) { - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; int ret; ret = intel_ring_begin(req, 4); if (ret) return ret; - intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4)); - intel_ring_emit(engine, PIPE_CONTROL_CS_STALL | - PIPE_CONTROL_STALL_AT_SCOREBOARD); - intel_ring_emit(engine, 0); - intel_ring_emit(engine, 0); - intel_ring_advance(engine); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); + intel_ring_emit(ring, + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_STALL_AT_SCOREBOARD); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); return 0; } static int -gen7_render_ring_flush(struct drm_i915_gem_request *req, - u32 invalidate_domains, u32 flush_domains) +gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) { - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; + u32 scratch_addr = + i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; u32 flags = 0; - u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; int ret; /* @@ -318,13 +304,13 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, * number of bits based on the write domains has little performance * impact. */ - if (flush_domains) { + if (mode & EMIT_FLUSH) { flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; flags |= PIPE_CONTROL_FLUSH_ENABLE; } - if (invalidate_domains) { + if (mode & EMIT_INVALIDATE) { flags |= PIPE_CONTROL_TLB_INVALIDATE; flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; @@ -350,11 +336,11 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, if (ret) return ret; - intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4)); - intel_ring_emit(engine, flags); - intel_ring_emit(engine, scratch_addr); - intel_ring_emit(engine, 0); - intel_ring_advance(engine); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); + intel_ring_emit(ring, flags); + intel_ring_emit(ring, scratch_addr); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); return 0; } @@ -363,41 +349,41 @@ static int gen8_emit_pipe_control(struct drm_i915_gem_request *req, u32 flags, u32 scratch_addr) { - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; int ret; ret = intel_ring_begin(req, 6); if (ret) return ret; - intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6)); - intel_ring_emit(engine, flags); - intel_ring_emit(engine, scratch_addr); - intel_ring_emit(engine, 0); - intel_ring_emit(engine, 0); - intel_ring_emit(engine, 0); - intel_ring_advance(engine); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(ring, flags); + intel_ring_emit(ring, scratch_addr); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); return 0; } static int -gen8_render_ring_flush(struct drm_i915_gem_request *req, - u32 invalidate_domains, u32 flush_domains) +gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) { + u32 scratch_addr = + i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; u32 flags = 0; - u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; int ret; flags |= PIPE_CONTROL_CS_STALL; - if (flush_domains) { + if (mode & EMIT_FLUSH) { flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; flags |= PIPE_CONTROL_FLUSH_ENABLE; } - if (invalidate_domains) { + if (mode & EMIT_INVALIDATE) { flags |= PIPE_CONTROL_TLB_INVALIDATE; flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; @@ -419,14 +405,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, return gen8_emit_pipe_control(req, flags, scratch_addr); } -static void ring_write_tail(struct intel_engine_cs *engine, - u32 value) -{ - struct drm_i915_private *dev_priv = engine->i915; - I915_WRITE_TAIL(engine, value); -} - -u64 intel_ring_get_active_head(struct intel_engine_cs *engine) +u64 intel_engine_get_active_head(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; u64 acthd; @@ -488,7 +467,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) mmio = RING_HWS_PGA(engine->mmio_base); } - I915_WRITE(mmio, (u32)engine->status_page.gfx_addr); + I915_WRITE(mmio, engine->status_page.ggtt_offset); POSTING_READ(mmio); /* @@ -519,7 +498,7 @@ static bool stop_ring(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - if (!IS_GEN2(dev_priv)) { + if (INTEL_GEN(dev_priv) > 2) { I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); if (intel_wait_for_register(dev_priv, RING_MI_MODE(engine->mmio_base), @@ -539,9 +518,9 @@ static bool stop_ring(struct intel_engine_cs *engine) I915_WRITE_CTL(engine, 0); I915_WRITE_HEAD(engine, 0); - engine->write_tail(engine, 0); + I915_WRITE_TAIL(engine, 0); - if (!IS_GEN2(dev_priv)) { + if (INTEL_GEN(dev_priv) > 2) { (void)I915_READ_CTL(engine); I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); } @@ -549,16 +528,10 @@ static bool stop_ring(struct intel_engine_cs *engine) return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0; } -void intel_engine_init_hangcheck(struct intel_engine_cs *engine) -{ - memset(&engine->hangcheck, 0, sizeof(engine->hangcheck)); -} - static int init_ring_common(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - struct intel_ringbuffer *ringbuf = engine->buffer; - struct drm_i915_gem_object *obj = ringbuf->obj; + struct intel_ring *ring = engine->buffer; int ret = 0; intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); @@ -598,7 +571,7 @@ static int init_ring_common(struct intel_engine_cs *engine) * registers with the above sequence (the readback of the HEAD registers * also enforces ordering), otherwise the hw might lose the new ring * register values. */ - I915_WRITE_START(engine, i915_gem_obj_ggtt_offset(obj)); + I915_WRITE_START(engine, i915_ggtt_offset(ring->vma)); /* WaClearRingBufHeadRegAtInit:ctg,elk */ if (I915_READ_HEAD(engine)) @@ -608,29 +581,29 @@ static int init_ring_common(struct intel_engine_cs *engine) (void)I915_READ_HEAD(engine); I915_WRITE_CTL(engine, - ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) + ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); /* If the head is still not zero, the ring is dead */ if (wait_for((I915_READ_CTL(engine) & RING_VALID) != 0 && - I915_READ_START(engine) == i915_gem_obj_ggtt_offset(obj) && + I915_READ_START(engine) == i915_ggtt_offset(ring->vma) && (I915_READ_HEAD(engine) & HEAD_ADDR) == 0, 50)) { DRM_ERROR("%s initialization failed " - "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n", + "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08x]\n", engine->name, I915_READ_CTL(engine), I915_READ_CTL(engine) & RING_VALID, I915_READ_HEAD(engine), I915_READ_TAIL(engine), I915_READ_START(engine), - (unsigned long)i915_gem_obj_ggtt_offset(obj)); + i915_ggtt_offset(ring->vma)); ret = -EIO; goto out; } - ringbuf->last_retired_head = -1; - ringbuf->head = I915_READ_HEAD(engine); - ringbuf->tail = I915_READ_TAIL(engine) & TAIL_ADDR; - intel_ring_update_space(ringbuf); + ring->last_retired_head = -1; + ring->head = I915_READ_HEAD(engine); + ring->tail = I915_READ_TAIL(engine) & TAIL_ADDR; + intel_ring_update_space(ring); intel_engine_init_hangcheck(engine); @@ -640,59 +613,16 @@ out: return ret; } -void intel_fini_pipe_control(struct intel_engine_cs *engine) -{ - if (engine->scratch.obj == NULL) - return; - - i915_gem_object_ggtt_unpin(engine->scratch.obj); - drm_gem_object_unreference(&engine->scratch.obj->base); - engine->scratch.obj = NULL; -} - -int intel_init_pipe_control(struct intel_engine_cs *engine, int size) -{ - struct drm_i915_gem_object *obj; - int ret; - - WARN_ON(engine->scratch.obj); - - obj = i915_gem_object_create_stolen(&engine->i915->drm, size); - if (!obj) - obj = i915_gem_object_create(&engine->i915->drm, size); - if (IS_ERR(obj)) { - DRM_ERROR("Failed to allocate scratch page\n"); - ret = PTR_ERR(obj); - goto err; - } - - ret = i915_gem_obj_ggtt_pin(obj, 4096, PIN_HIGH); - if (ret) - goto err_unref; - - engine->scratch.obj = obj; - engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); - DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", - engine->name, engine->scratch.gtt_offset); - return 0; - -err_unref: - drm_gem_object_unreference(&engine->scratch.obj->base); -err: - return ret; -} - static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) { - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; struct i915_workarounds *w = &req->i915->workarounds; int ret, i; if (w->count == 0) return 0; - engine->gpu_caches_dirty = true; - ret = intel_ring_flush_all_caches(req); + ret = req->engine->emit_flush(req, EMIT_BARRIER); if (ret) return ret; @@ -700,17 +630,16 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) if (ret) return ret; - intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(w->count)); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count)); for (i = 0; i < w->count; i++) { - intel_ring_emit_reg(engine, w->reg[i].addr); - intel_ring_emit(engine, w->reg[i].value); + intel_ring_emit_reg(ring, w->reg[i].addr); + intel_ring_emit(ring, w->reg[i].value); } - intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(engine); + intel_ring_advance(ring); - engine->gpu_caches_dirty = true; - ret = intel_ring_flush_all_caches(req); + ret = req->engine->emit_flush(req, EMIT_BARRIER); if (ret) return ret; @@ -1178,8 +1107,8 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2)); - /* WaInsertDummyPushConstPs:bxt */ - if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) + /* WaToEnableHwFixForPushConstHWBug:bxt */ + if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); @@ -1222,8 +1151,8 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine) I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_RO_PERF_DIS); - /* WaInsertDummyPushConstPs:kbl */ - if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) + /* WaToEnableHwFixForPushConstHWBug:kbl */ + if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER)) WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); @@ -1329,191 +1258,194 @@ static void render_ring_cleanup(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - if (dev_priv->semaphore_obj) { - i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj); - drm_gem_object_unreference(&dev_priv->semaphore_obj->base); - dev_priv->semaphore_obj = NULL; - } - - intel_fini_pipe_control(engine); + i915_vma_unpin_and_release(&dev_priv->semaphore); } -static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, - unsigned int num_dwords) +static int gen8_rcs_signal(struct drm_i915_gem_request *req) { -#define MBOX_UPDATE_DWORDS 8 - struct intel_engine_cs *signaller = signaller_req->engine; - struct drm_i915_private *dev_priv = signaller_req->i915; + struct intel_ring *ring = req->ring; + struct drm_i915_private *dev_priv = req->i915; struct intel_engine_cs *waiter; enum intel_engine_id id; int ret, num_rings; - num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); - num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; -#undef MBOX_UPDATE_DWORDS - - ret = intel_ring_begin(signaller_req, num_dwords); + num_rings = INTEL_INFO(dev_priv)->num_rings; + ret = intel_ring_begin(req, (num_rings-1) * 8); if (ret) return ret; for_each_engine_id(waiter, dev_priv, id) { - u64 gtt_offset = signaller->semaphore.signal_ggtt[id]; + u64 gtt_offset = req->engine->semaphore.signal_ggtt[id]; if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) continue; - intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); - intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | - PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_CS_STALL); - intel_ring_emit(signaller, lower_32_bits(gtt_offset)); - intel_ring_emit(signaller, upper_32_bits(gtt_offset)); - intel_ring_emit(signaller, signaller_req->seqno); - intel_ring_emit(signaller, 0); - intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | - MI_SEMAPHORE_TARGET(waiter->hw_id)); - intel_ring_emit(signaller, 0); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(ring, + PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_CS_STALL); + intel_ring_emit(ring, lower_32_bits(gtt_offset)); + intel_ring_emit(ring, upper_32_bits(gtt_offset)); + intel_ring_emit(ring, req->fence.seqno); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, + MI_SEMAPHORE_SIGNAL | + MI_SEMAPHORE_TARGET(waiter->hw_id)); + intel_ring_emit(ring, 0); } + intel_ring_advance(ring); return 0; } -static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, - unsigned int num_dwords) +static int gen8_xcs_signal(struct drm_i915_gem_request *req) { -#define MBOX_UPDATE_DWORDS 6 - struct intel_engine_cs *signaller = signaller_req->engine; - struct drm_i915_private *dev_priv = signaller_req->i915; + struct intel_ring *ring = req->ring; + struct drm_i915_private *dev_priv = req->i915; struct intel_engine_cs *waiter; enum intel_engine_id id; int ret, num_rings; - num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); - num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; -#undef MBOX_UPDATE_DWORDS - - ret = intel_ring_begin(signaller_req, num_dwords); + num_rings = INTEL_INFO(dev_priv)->num_rings; + ret = intel_ring_begin(req, (num_rings-1) * 6); if (ret) return ret; for_each_engine_id(waiter, dev_priv, id) { - u64 gtt_offset = signaller->semaphore.signal_ggtt[id]; + u64 gtt_offset = req->engine->semaphore.signal_ggtt[id]; if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) continue; - intel_ring_emit(signaller, (MI_FLUSH_DW + 1) | - MI_FLUSH_DW_OP_STOREDW); - intel_ring_emit(signaller, lower_32_bits(gtt_offset) | - MI_FLUSH_DW_USE_GTT); - intel_ring_emit(signaller, upper_32_bits(gtt_offset)); - intel_ring_emit(signaller, signaller_req->seqno); - intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | - MI_SEMAPHORE_TARGET(waiter->hw_id)); - intel_ring_emit(signaller, 0); + intel_ring_emit(ring, + (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); + intel_ring_emit(ring, + lower_32_bits(gtt_offset) | + MI_FLUSH_DW_USE_GTT); + intel_ring_emit(ring, upper_32_bits(gtt_offset)); + intel_ring_emit(ring, req->fence.seqno); + intel_ring_emit(ring, + MI_SEMAPHORE_SIGNAL | + MI_SEMAPHORE_TARGET(waiter->hw_id)); + intel_ring_emit(ring, 0); } + intel_ring_advance(ring); return 0; } -static int gen6_signal(struct drm_i915_gem_request *signaller_req, - unsigned int num_dwords) +static int gen6_signal(struct drm_i915_gem_request *req) { - struct intel_engine_cs *signaller = signaller_req->engine; - struct drm_i915_private *dev_priv = signaller_req->i915; - struct intel_engine_cs *useless; - enum intel_engine_id id; + struct intel_ring *ring = req->ring; + struct drm_i915_private *dev_priv = req->i915; + struct intel_engine_cs *engine; int ret, num_rings; -#define MBOX_UPDATE_DWORDS 3 - num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); - num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); -#undef MBOX_UPDATE_DWORDS - - ret = intel_ring_begin(signaller_req, num_dwords); + num_rings = INTEL_INFO(dev_priv)->num_rings; + ret = intel_ring_begin(req, round_up((num_rings-1) * 3, 2)); if (ret) return ret; - for_each_engine_id(useless, dev_priv, id) { - i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id]; + for_each_engine(engine, dev_priv) { + i915_reg_t mbox_reg; + if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK)) + continue; + + mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id]; if (i915_mmio_reg_valid(mbox_reg)) { - intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(signaller, mbox_reg); - intel_ring_emit(signaller, signaller_req->seqno); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit_reg(ring, mbox_reg); + intel_ring_emit(ring, req->fence.seqno); } } /* If num_dwords was rounded, make sure the tail pointer is correct */ if (num_rings % 2 == 0) - intel_ring_emit(signaller, MI_NOOP); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + + return 0; +} + +static void i9xx_submit_request(struct drm_i915_gem_request *request) +{ + struct drm_i915_private *dev_priv = request->i915; + + I915_WRITE_TAIL(request->engine, + intel_ring_offset(request->ring, request->tail)); +} + +static int i9xx_emit_request(struct drm_i915_gem_request *req) +{ + struct intel_ring *ring = req->ring; + int ret; + + ret = intel_ring_begin(req, 4); + if (ret) + return ret; + + intel_ring_emit(ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(ring, req->fence.seqno); + intel_ring_emit(ring, MI_USER_INTERRUPT); + intel_ring_advance(ring); + + req->tail = ring->tail; return 0; } /** - * gen6_add_request - Update the semaphore mailbox registers + * gen6_sema_emit_request - Update the semaphore mailbox registers * * @request - request to write to the ring * * Update the mailbox registers in the *other* rings with the current seqno. * This acts like a signal in the canonical semaphore. */ -static int -gen6_add_request(struct drm_i915_gem_request *req) +static int gen6_sema_emit_request(struct drm_i915_gem_request *req) { - struct intel_engine_cs *engine = req->engine; int ret; - if (engine->semaphore.signal) - ret = engine->semaphore.signal(req, 4); - else - ret = intel_ring_begin(req, 4); - + ret = req->engine->semaphore.signal(req); if (ret) return ret; - intel_ring_emit(engine, MI_STORE_DWORD_INDEX); - intel_ring_emit(engine, - I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(engine, req->seqno); - intel_ring_emit(engine, MI_USER_INTERRUPT); - __intel_ring_advance(engine); - - return 0; + return i9xx_emit_request(req); } -static int -gen8_render_add_request(struct drm_i915_gem_request *req) +static int gen8_render_emit_request(struct drm_i915_gem_request *req) { struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; int ret; - if (engine->semaphore.signal) - ret = engine->semaphore.signal(req, 8); - else - ret = intel_ring_begin(req, 8); + if (engine->semaphore.signal) { + ret = engine->semaphore.signal(req); + if (ret) + return ret; + } + + ret = intel_ring_begin(req, 8); if (ret) return ret; - intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6)); - intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB | - PIPE_CONTROL_CS_STALL | - PIPE_CONTROL_QW_WRITE)); - intel_ring_emit(engine, intel_hws_seqno_address(req->engine)); - intel_ring_emit(engine, 0); - intel_ring_emit(engine, i915_gem_request_get_seqno(req)); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(ring, (PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_QW_WRITE)); + intel_ring_emit(ring, intel_hws_seqno_address(engine)); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, i915_gem_request_get_seqno(req)); /* We're thrashing one dword of HWS. */ - intel_ring_emit(engine, 0); - intel_ring_emit(engine, MI_USER_INTERRUPT); - intel_ring_emit(engine, MI_NOOP); - __intel_ring_advance(engine); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_USER_INTERRUPT); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); - return 0; -} + req->tail = ring->tail; -static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv, - u32 seqno) -{ - return dev_priv->last_seqno < seqno; + return 0; } /** @@ -1525,82 +1457,71 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv, */ static int -gen8_ring_sync(struct drm_i915_gem_request *waiter_req, - struct intel_engine_cs *signaller, - u32 seqno) +gen8_ring_sync_to(struct drm_i915_gem_request *req, + struct drm_i915_gem_request *signal) { - struct intel_engine_cs *waiter = waiter_req->engine; - struct drm_i915_private *dev_priv = waiter_req->i915; - u64 offset = GEN8_WAIT_OFFSET(waiter, signaller->id); + struct intel_ring *ring = req->ring; + struct drm_i915_private *dev_priv = req->i915; + u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id); struct i915_hw_ppgtt *ppgtt; int ret; - ret = intel_ring_begin(waiter_req, 4); + ret = intel_ring_begin(req, 4); if (ret) return ret; - intel_ring_emit(waiter, MI_SEMAPHORE_WAIT | - MI_SEMAPHORE_GLOBAL_GTT | - MI_SEMAPHORE_SAD_GTE_SDD); - intel_ring_emit(waiter, seqno); - intel_ring_emit(waiter, lower_32_bits(offset)); - intel_ring_emit(waiter, upper_32_bits(offset)); - intel_ring_advance(waiter); + intel_ring_emit(ring, + MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_SAD_GTE_SDD); + intel_ring_emit(ring, signal->fence.seqno); + intel_ring_emit(ring, lower_32_bits(offset)); + intel_ring_emit(ring, upper_32_bits(offset)); + intel_ring_advance(ring); /* When the !RCS engines idle waiting upon a semaphore, they lose their * pagetables and we must reload them before executing the batch. * We do this on the i915_switch_context() following the wait and * before the dispatch. */ - ppgtt = waiter_req->ctx->ppgtt; - if (ppgtt && waiter_req->engine->id != RCS) - ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine); + ppgtt = req->ctx->ppgtt; + if (ppgtt && req->engine->id != RCS) + ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine); return 0; } static int -gen6_ring_sync(struct drm_i915_gem_request *waiter_req, - struct intel_engine_cs *signaller, - u32 seqno) +gen6_ring_sync_to(struct drm_i915_gem_request *req, + struct drm_i915_gem_request *signal) { - struct intel_engine_cs *waiter = waiter_req->engine; + struct intel_ring *ring = req->ring; u32 dw1 = MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER; - u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id]; + u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id]; int ret; - /* Throughout all of the GEM code, seqno passed implies our current - * seqno is >= the last seqno executed. However for hardware the - * comparison is strictly greater than. - */ - seqno -= 1; - WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID); - ret = intel_ring_begin(waiter_req, 4); + ret = intel_ring_begin(req, 4); if (ret) return ret; - /* If seqno wrap happened, omit the wait with no-ops */ - if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) { - intel_ring_emit(waiter, dw1 | wait_mbox); - intel_ring_emit(waiter, seqno); - intel_ring_emit(waiter, 0); - intel_ring_emit(waiter, MI_NOOP); - } else { - intel_ring_emit(waiter, MI_NOOP); - intel_ring_emit(waiter, MI_NOOP); - intel_ring_emit(waiter, MI_NOOP); - intel_ring_emit(waiter, MI_NOOP); - } - intel_ring_advance(waiter); + intel_ring_emit(ring, dw1 | wait_mbox); + /* Throughout all of the GEM code, seqno passed implies our current + * seqno is >= the last seqno executed. However for hardware the + * comparison is strictly greater than. + */ + intel_ring_emit(ring, signal->fence.seqno - 1); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } static void -gen5_seqno_barrier(struct intel_engine_cs *ring) +gen5_seqno_barrier(struct intel_engine_cs *engine) { /* MI_STORE are internally buffered by the GPU and not flushed * either by MI_FLUSH or SyncFlush or any other combination of @@ -1693,40 +1614,18 @@ i8xx_irq_disable(struct intel_engine_cs *engine) } static int -bsd_ring_flush(struct drm_i915_gem_request *req, - u32 invalidate_domains, - u32 flush_domains) +bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode) { - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; int ret; ret = intel_ring_begin(req, 2); if (ret) return ret; - intel_ring_emit(engine, MI_FLUSH); - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); - return 0; -} - -static int -i9xx_add_request(struct drm_i915_gem_request *req) -{ - struct intel_engine_cs *engine = req->engine; - int ret; - - ret = intel_ring_begin(req, 4); - if (ret) - return ret; - - intel_ring_emit(engine, MI_STORE_DWORD_INDEX); - intel_ring_emit(engine, - I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(engine, req->seqno); - intel_ring_emit(engine, MI_USER_INTERRUPT); - __intel_ring_advance(engine); - + intel_ring_emit(ring, MI_FLUSH); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } @@ -1788,24 +1687,24 @@ gen8_irq_disable(struct intel_engine_cs *engine) } static int -i965_dispatch_execbuffer(struct drm_i915_gem_request *req, - u64 offset, u32 length, - unsigned dispatch_flags) +i965_emit_bb_start(struct drm_i915_gem_request *req, + u64 offset, u32 length, + unsigned int dispatch_flags) { - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; int ret; ret = intel_ring_begin(req, 2); if (ret) return ret; - intel_ring_emit(engine, + intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); - intel_ring_emit(engine, offset); - intel_ring_advance(engine); + intel_ring_emit(ring, offset); + intel_ring_advance(ring); return 0; } @@ -1815,12 +1714,12 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req, #define I830_TLB_ENTRIES (2) #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) static int -i830_dispatch_execbuffer(struct drm_i915_gem_request *req, - u64 offset, u32 len, - unsigned dispatch_flags) +i830_emit_bb_start(struct drm_i915_gem_request *req, + u64 offset, u32 len, + unsigned int dispatch_flags) { - struct intel_engine_cs *engine = req->engine; - u32 cs_offset = engine->scratch.gtt_offset; + struct intel_ring *ring = req->ring; + u32 cs_offset = i915_ggtt_offset(req->engine->scratch); int ret; ret = intel_ring_begin(req, 6); @@ -1828,13 +1727,13 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req, return ret; /* Evict the invalid PTE TLBs */ - intel_ring_emit(engine, COLOR_BLT_CMD | BLT_WRITE_RGBA); - intel_ring_emit(engine, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096); - intel_ring_emit(engine, I830_TLB_ENTRIES << 16 | 4); /* load each page */ - intel_ring_emit(engine, cs_offset); - intel_ring_emit(engine, 0xdeadbeef); - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA); + intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096); + intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */ + intel_ring_emit(ring, cs_offset); + intel_ring_emit(ring, 0xdeadbeef); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) { if (len > I830_BATCH_LIMIT) @@ -1848,17 +1747,17 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req, * stable batch scratch bo area (so that the CS never * stumbles over its tlb invalidation bug) ... */ - intel_ring_emit(engine, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA); - intel_ring_emit(engine, + intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA); + intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096); - intel_ring_emit(engine, DIV_ROUND_UP(len, 4096) << 16 | 4096); - intel_ring_emit(engine, cs_offset); - intel_ring_emit(engine, 4096); - intel_ring_emit(engine, offset); + intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096); + intel_ring_emit(ring, cs_offset); + intel_ring_emit(ring, 4096); + intel_ring_emit(ring, offset); - intel_ring_emit(engine, MI_FLUSH); - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + intel_ring_emit(ring, MI_FLUSH); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); /* ... and execute it. */ offset = cs_offset; @@ -1868,30 +1767,30 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req, if (ret) return ret; - intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT); - intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ? - 0 : MI_BATCH_NON_SECURE)); - intel_ring_advance(engine); + intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); + intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ? + 0 : MI_BATCH_NON_SECURE)); + intel_ring_advance(ring); return 0; } static int -i915_dispatch_execbuffer(struct drm_i915_gem_request *req, - u64 offset, u32 len, - unsigned dispatch_flags) +i915_emit_bb_start(struct drm_i915_gem_request *req, + u64 offset, u32 len, + unsigned int dispatch_flags) { - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; int ret; ret = intel_ring_begin(req, 2); if (ret) return ret; - intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT); - intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ? - 0 : MI_BATCH_NON_SECURE)); - intel_ring_advance(engine); + intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); + intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ? + 0 : MI_BATCH_NON_SECURE)); + intel_ring_advance(ring); return 0; } @@ -1909,79 +1808,79 @@ static void cleanup_phys_status_page(struct intel_engine_cs *engine) static void cleanup_status_page(struct intel_engine_cs *engine) { - struct drm_i915_gem_object *obj; + struct i915_vma *vma; - obj = engine->status_page.obj; - if (obj == NULL) + vma = fetch_and_zero(&engine->status_page.vma); + if (!vma) return; - kunmap(sg_page(obj->pages->sgl)); - i915_gem_object_ggtt_unpin(obj); - drm_gem_object_unreference(&obj->base); - engine->status_page.obj = NULL; + i915_vma_unpin(vma); + i915_gem_object_unpin_map(vma->obj); + i915_vma_put(vma); } static int init_status_page(struct intel_engine_cs *engine) { - struct drm_i915_gem_object *obj = engine->status_page.obj; - - if (obj == NULL) { - unsigned flags; - int ret; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + unsigned int flags; + int ret; - obj = i915_gem_object_create(&engine->i915->drm, 4096); - if (IS_ERR(obj)) { - DRM_ERROR("Failed to allocate status page\n"); - return PTR_ERR(obj); - } + obj = i915_gem_object_create(&engine->i915->drm, 4096); + if (IS_ERR(obj)) { + DRM_ERROR("Failed to allocate status page\n"); + return PTR_ERR(obj); + } - ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - if (ret) - goto err_unref; - - flags = 0; - if (!HAS_LLC(engine->i915)) - /* On g33, we cannot place HWS above 256MiB, so - * restrict its pinning to the low mappable arena. - * Though this restriction is not documented for - * gen4, gen5, or byt, they also behave similarly - * and hang if the HWS is placed at the top of the - * GTT. To generalise, it appears that all !llc - * platforms have issues with us placing the HWS - * above the mappable region (even though we never - * actualy map it). - */ - flags |= PIN_MAPPABLE; - ret = i915_gem_obj_ggtt_pin(obj, 4096, flags); - if (ret) { -err_unref: - drm_gem_object_unreference(&obj->base); - return ret; - } + ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + if (ret) + goto err; - engine->status_page.obj = obj; + vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err; } - engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); - engine->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); - memset(engine->status_page.page_addr, 0, PAGE_SIZE); + flags = PIN_GLOBAL; + if (!HAS_LLC(engine->i915)) + /* On g33, we cannot place HWS above 256MiB, so + * restrict its pinning to the low mappable arena. + * Though this restriction is not documented for + * gen4, gen5, or byt, they also behave similarly + * and hang if the HWS is placed at the top of the + * GTT. To generalise, it appears that all !llc + * platforms have issues with us placing the HWS + * above the mappable region (even though we never + * actualy map it). + */ + flags |= PIN_MAPPABLE; + ret = i915_vma_pin(vma, 0, 4096, flags); + if (ret) + goto err; - DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", - engine->name, engine->status_page.gfx_addr); + engine->status_page.vma = vma; + engine->status_page.ggtt_offset = i915_ggtt_offset(vma); + engine->status_page.page_addr = + i915_gem_object_pin_map(obj, I915_MAP_WB); + DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", + engine->name, i915_ggtt_offset(vma)); return 0; + +err: + i915_gem_object_put(obj); + return ret; } static int init_phys_status_page(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - if (!dev_priv->status_page_dmah) { - dev_priv->status_page_dmah = - drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE); - if (!dev_priv->status_page_dmah) - return -ENOMEM; - } + dev_priv->status_page_dmah = + drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE); + if (!dev_priv->status_page_dmah) + return -ENOMEM; engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr; memset(engine->status_page.page_addr, 0, PAGE_SIZE); @@ -1989,115 +1888,105 @@ static int init_phys_status_page(struct intel_engine_cs *engine) return 0; } -void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) +int intel_ring_pin(struct intel_ring *ring) { - GEM_BUG_ON(ringbuf->vma == NULL); - GEM_BUG_ON(ringbuf->virtual_start == NULL); - - if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen) - i915_gem_object_unpin_map(ringbuf->obj); - else - i915_vma_unpin_iomap(ringbuf->vma); - ringbuf->virtual_start = NULL; - - i915_gem_object_ggtt_unpin(ringbuf->obj); - ringbuf->vma = NULL; -} - -int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv, - struct intel_ringbuffer *ringbuf) -{ - struct drm_i915_gem_object *obj = ringbuf->obj; /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ - unsigned flags = PIN_OFFSET_BIAS | 4096; + unsigned int flags = PIN_GLOBAL | PIN_OFFSET_BIAS | 4096; + enum i915_map_type map; + struct i915_vma *vma = ring->vma; void *addr; int ret; - if (HAS_LLC(dev_priv) && !obj->stolen) { - ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags); - if (ret) - return ret; + GEM_BUG_ON(ring->vaddr); - ret = i915_gem_object_set_to_cpu_domain(obj, true); - if (ret) - goto err_unpin; + map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC; - addr = i915_gem_object_pin_map(obj); - if (IS_ERR(addr)) { - ret = PTR_ERR(addr); - goto err_unpin; - } - } else { - ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, - flags | PIN_MAPPABLE); - if (ret) - return ret; + if (vma->obj->stolen) + flags |= PIN_MAPPABLE; - ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret) - goto err_unpin; + if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { + if (flags & PIN_MAPPABLE || map == I915_MAP_WC) + ret = i915_gem_object_set_to_gtt_domain(vma->obj, true); + else + ret = i915_gem_object_set_to_cpu_domain(vma->obj, true); + if (unlikely(ret)) + return ret; + } - /* Access through the GTT requires the device to be awake. */ - assert_rpm_wakelock_held(dev_priv); + ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags); + if (unlikely(ret)) + return ret; - addr = i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj)); - if (IS_ERR(addr)) { - ret = PTR_ERR(addr); - goto err_unpin; - } - } + if (i915_vma_is_map_and_fenceable(vma)) + addr = (void __force *)i915_vma_pin_iomap(vma); + else + addr = i915_gem_object_pin_map(vma->obj, map); + if (IS_ERR(addr)) + goto err; - ringbuf->virtual_start = addr; - ringbuf->vma = i915_gem_obj_to_ggtt(obj); + ring->vaddr = addr; return 0; -err_unpin: - i915_gem_object_ggtt_unpin(obj); - return ret; +err: + i915_vma_unpin(vma); + return PTR_ERR(addr); } -static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) +void intel_ring_unpin(struct intel_ring *ring) { - drm_gem_object_unreference(&ringbuf->obj->base); - ringbuf->obj = NULL; + GEM_BUG_ON(!ring->vma); + GEM_BUG_ON(!ring->vaddr); + + if (i915_vma_is_map_and_fenceable(ring->vma)) + i915_vma_unpin_iomap(ring->vma); + else + i915_gem_object_unpin_map(ring->vma->obj); + ring->vaddr = NULL; + + i915_vma_unpin(ring->vma); } -static int intel_alloc_ringbuffer_obj(struct drm_device *dev, - struct intel_ringbuffer *ringbuf) +static struct i915_vma * +intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) { struct drm_i915_gem_object *obj; + struct i915_vma *vma; - obj = NULL; - if (!HAS_LLC(dev)) - obj = i915_gem_object_create_stolen(dev, ringbuf->size); - if (obj == NULL) - obj = i915_gem_object_create(dev, ringbuf->size); + obj = i915_gem_object_create_stolen(&dev_priv->drm, size); + if (!obj) + obj = i915_gem_object_create(&dev_priv->drm, size); if (IS_ERR(obj)) - return PTR_ERR(obj); + return ERR_CAST(obj); /* mark ring buffers as read-only from GPU side by default */ obj->gt_ro = 1; - ringbuf->obj = obj; + vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL); + if (IS_ERR(vma)) + goto err; - return 0; + return vma; + +err: + i915_gem_object_put(obj); + return vma; } -struct intel_ringbuffer * -intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size) +struct intel_ring * +intel_engine_create_ring(struct intel_engine_cs *engine, int size) { - struct intel_ringbuffer *ring; - int ret; + struct intel_ring *ring; + struct i915_vma *vma; + + GEM_BUG_ON(!is_power_of_2(size)); ring = kzalloc(sizeof(*ring), GFP_KERNEL); - if (ring == NULL) { - DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n", - engine->name); + if (!ring) return ERR_PTR(-ENOMEM); - } ring->engine = engine; - list_add(&ring->link, &engine->buffers); + + INIT_LIST_HEAD(&ring->request_list); ring->size = size; /* Workaround an erratum on the i830 which causes a hang if @@ -2111,22 +2000,21 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size) ring->last_retired_head = -1; intel_ring_update_space(ring); - ret = intel_alloc_ringbuffer_obj(&engine->i915->drm, ring); - if (ret) { - DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n", - engine->name, ret); - list_del(&ring->link); + vma = intel_ring_create_vma(engine->i915, size); + if (IS_ERR(vma)) { kfree(ring); - return ERR_PTR(ret); + return ERR_CAST(vma); } + ring->vma = vma; + list_add(&ring->link, &engine->buffers); return ring; } void -intel_ringbuffer_free(struct intel_ringbuffer *ring) +intel_ring_free(struct intel_ring *ring) { - intel_destroy_ringbuffer_obj(ring); + i915_vma_put(ring->vma); list_del(&ring->link); kfree(ring); } @@ -2143,7 +2031,12 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx, return 0; if (ce->state) { - ret = i915_gem_obj_ggtt_pin(ce->state, ctx->ggtt_alignment, 0); + ret = i915_gem_object_set_to_gtt_domain(ce->state->obj, false); + if (ret) + goto error; + + ret = i915_vma_pin(ce->state, 0, ctx->ggtt_alignment, + PIN_GLOBAL | PIN_HIGH); if (ret) goto error; } @@ -2158,7 +2051,7 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx, if (ctx == ctx->i915->kernel_context) ce->initialised = true; - i915_gem_context_reference(ctx); + i915_gem_context_get(ctx); return 0; error: @@ -2177,30 +2070,25 @@ static void intel_ring_context_unpin(struct i915_gem_context *ctx, return; if (ce->state) - i915_gem_object_ggtt_unpin(ce->state); + i915_vma_unpin(ce->state); - i915_gem_context_unreference(ctx); + i915_gem_context_put(ctx); } -static int intel_init_ring_buffer(struct drm_device *dev, - struct intel_engine_cs *engine) +static int intel_init_ring_buffer(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_ringbuffer *ringbuf; + struct drm_i915_private *dev_priv = engine->i915; + struct intel_ring *ring; int ret; WARN_ON(engine->buffer); - engine->i915 = dev_priv; - INIT_LIST_HEAD(&engine->active_list); - INIT_LIST_HEAD(&engine->request_list); - INIT_LIST_HEAD(&engine->execlist_queue); - INIT_LIST_HEAD(&engine->buffers); - i915_gem_batch_pool_init(dev, &engine->batch_pool); + intel_engine_setup_common(engine); + memset(engine->semaphore.sync_seqno, 0, sizeof(engine->semaphore.sync_seqno)); - ret = intel_engine_init_breadcrumbs(engine); + ret = intel_engine_init_common(engine); if (ret) goto error; @@ -2215,12 +2103,11 @@ static int intel_init_ring_buffer(struct drm_device *dev, if (ret) goto error; - ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE); - if (IS_ERR(ringbuf)) { - ret = PTR_ERR(ringbuf); + ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE); + if (IS_ERR(ring)) { + ret = PTR_ERR(ring); goto error; } - engine->buffer = ringbuf; if (I915_NEED_GFX_HWS(dev_priv)) { ret = init_status_page(engine); @@ -2233,26 +2120,21 @@ static int intel_init_ring_buffer(struct drm_device *dev, goto error; } - ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf); + ret = intel_ring_pin(ring); if (ret) { - DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", - engine->name, ret); - intel_destroy_ringbuffer_obj(ringbuf); + intel_ring_free(ring); goto error; } - - ret = i915_cmd_parser_init_ring(engine); - if (ret) - goto error; + engine->buffer = ring; return 0; error: - intel_cleanup_engine(engine); + intel_engine_cleanup(engine); return ret; } -void intel_cleanup_engine(struct intel_engine_cs *engine) +void intel_engine_cleanup(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv; @@ -2262,11 +2144,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) dev_priv = engine->i915; if (engine->buffer) { - intel_stop_engine(engine); - WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); + WARN_ON(INTEL_GEN(dev_priv) > 2 && + (I915_READ_MODE(engine) & MODE_IDLE) == 0); - intel_unpin_ringbuffer_obj(engine->buffer); - intel_ringbuffer_free(engine->buffer); + intel_ring_unpin(engine->buffer); + intel_ring_free(engine->buffer); engine->buffer = NULL; } @@ -2280,33 +2162,13 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) cleanup_phys_status_page(engine); } - i915_cmd_parser_fini_ring(engine); - i915_gem_batch_pool_fini(&engine->batch_pool); - intel_engine_fini_breadcrumbs(engine); + intel_engine_cleanup_common(engine); intel_ring_context_unpin(dev_priv->kernel_context, engine); engine->i915 = NULL; } -int intel_engine_idle(struct intel_engine_cs *engine) -{ - struct drm_i915_gem_request *req; - - /* Wait upon the last request to be completed */ - if (list_empty(&engine->request_list)) - return 0; - - req = list_entry(engine->request_list.prev, - struct drm_i915_gem_request, - list); - - /* Make sure we do not trigger any retires */ - return __i915_wait_request(req, - req->i915->mm.interruptible, - NULL, NULL); -} - int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) { int ret; @@ -2317,7 +2179,7 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) */ request->reserved_space += LEGACY_REQUEST_SIZE; - request->ringbuf = request->engine->buffer; + request->ring = request->engine->buffer; ret = intel_ring_begin(request, 0); if (ret) @@ -2329,12 +2191,12 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) static int wait_for_space(struct drm_i915_gem_request *req, int bytes) { - struct intel_ringbuffer *ringbuf = req->ringbuf; - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; struct drm_i915_gem_request *target; + int ret; - intel_ring_update_space(ringbuf); - if (ringbuf->space >= bytes) + intel_ring_update_space(ring); + if (ring->space >= bytes) return 0; /* @@ -2348,35 +2210,38 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes) */ GEM_BUG_ON(!req->reserved_space); - list_for_each_entry(target, &engine->request_list, list) { + list_for_each_entry(target, &ring->request_list, ring_link) { unsigned space; - /* - * The request queue is per-engine, so can contain requests - * from multiple ringbuffers. Here, we must ignore any that - * aren't from the ringbuffer we're considering. - */ - if (target->ringbuf != ringbuf) - continue; - /* Would completion of this request free enough space? */ - space = __intel_ring_space(target->postfix, ringbuf->tail, - ringbuf->size); + space = __intel_ring_space(target->postfix, ring->tail, + ring->size); if (space >= bytes) break; } - if (WARN_ON(&target->list == &engine->request_list)) + if (WARN_ON(&target->ring_link == &ring->request_list)) return -ENOSPC; - return i915_wait_request(target); + ret = i915_wait_request(target, true, NULL, NO_WAITBOOST); + if (ret) + return ret; + + if (i915_reset_in_progress(&target->i915->gpu_error)) + return -EAGAIN; + + i915_gem_request_retire_upto(target); + + intel_ring_update_space(ring); + GEM_BUG_ON(ring->space < bytes); + return 0; } int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) { - struct intel_ringbuffer *ringbuf = req->ringbuf; - int remain_actual = ringbuf->size - ringbuf->tail; - int remain_usable = ringbuf->effective_size - ringbuf->tail; + struct intel_ring *ring = req->ring; + int remain_actual = ring->size - ring->tail; + int remain_usable = ring->effective_size - ring->tail; int bytes = num_dwords * sizeof(u32); int total_bytes, wait_bytes; bool need_wrap = false; @@ -2403,37 +2268,33 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) wait_bytes = total_bytes; } - if (wait_bytes > ringbuf->space) { + if (wait_bytes > ring->space) { int ret = wait_for_space(req, wait_bytes); if (unlikely(ret)) return ret; - - intel_ring_update_space(ringbuf); - if (unlikely(ringbuf->space < wait_bytes)) - return -EAGAIN; } if (unlikely(need_wrap)) { - GEM_BUG_ON(remain_actual > ringbuf->space); - GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size); + GEM_BUG_ON(remain_actual > ring->space); + GEM_BUG_ON(ring->tail + remain_actual > ring->size); /* Fill the tail with MI_NOOP */ - memset(ringbuf->virtual_start + ringbuf->tail, - 0, remain_actual); - ringbuf->tail = 0; - ringbuf->space -= remain_actual; + memset(ring->vaddr + ring->tail, 0, remain_actual); + ring->tail = 0; + ring->space -= remain_actual; } - ringbuf->space -= bytes; - GEM_BUG_ON(ringbuf->space < 0); + ring->space -= bytes; + GEM_BUG_ON(ring->space < 0); return 0; } /* Align the ring tail to a cacheline boundary */ int intel_ring_cacheline_align(struct drm_i915_gem_request *req) { - struct intel_engine_cs *engine = req->engine; - int num_dwords = (engine->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); + struct intel_ring *ring = req->ring; + int num_dwords = + (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); int ret; if (num_dwords == 0) @@ -2445,61 +2306,16 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req) return ret; while (num_dwords--) - intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(engine); + intel_ring_advance(ring); return 0; } -void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) +static void gen6_bsd_submit_request(struct drm_i915_gem_request *request) { - struct drm_i915_private *dev_priv = engine->i915; - - /* Our semaphore implementation is strictly monotonic (i.e. we proceed - * so long as the semaphore value in the register/page is greater - * than the sync value), so whenever we reset the seqno, - * so long as we reset the tracking semaphore value to 0, it will - * always be before the next request's seqno. If we don't reset - * the semaphore value, then when the seqno moves backwards all - * future waits will complete instantly (causing rendering corruption). - */ - if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { - I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); - I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); - if (HAS_VEBOX(dev_priv)) - I915_WRITE(RING_SYNC_2(engine->mmio_base), 0); - } - if (dev_priv->semaphore_obj) { - struct drm_i915_gem_object *obj = dev_priv->semaphore_obj; - struct page *page = i915_gem_object_get_dirty_page(obj, 0); - void *semaphores = kmap(page); - memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0), - 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size); - kunmap(page); - } - memset(engine->semaphore.sync_seqno, 0, - sizeof(engine->semaphore.sync_seqno)); - - intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); - if (engine->irq_seqno_barrier) - engine->irq_seqno_barrier(engine); - engine->last_submitted_seqno = seqno; - - engine->hangcheck.seqno = seqno; - - /* After manually advancing the seqno, fake the interrupt in case - * there are any waiters for that seqno. - */ - rcu_read_lock(); - intel_engine_wakeup(engine); - rcu_read_unlock(); -} - -static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine, - u32 value) -{ - struct drm_i915_private *dev_priv = engine->i915; + struct drm_i915_private *dev_priv = request->i915; intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); @@ -2523,8 +2339,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine, DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); /* Now that the ring is fully powered up, update the tail */ - I915_WRITE_FW(RING_TAIL(engine->mmio_base), value); - POSTING_READ_FW(RING_TAIL(engine->mmio_base)); + i9xx_submit_request(request); /* Let the ring send IDLE messages to the GT again, * and so let it sleep to conserve power when idle. @@ -2535,10 +2350,9 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine, intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, - u32 invalidate, u32 flush) +static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode) { - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; uint32_t cmd; int ret; @@ -2563,30 +2377,29 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, * operation is complete. This bit is only valid when the * Post-Sync Operation field is a value of 1h or 3h." */ - if (invalidate & I915_GEM_GPU_DOMAINS) + if (mode & EMIT_INVALIDATE) cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; - intel_ring_emit(engine, cmd); - intel_ring_emit(engine, - I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); + intel_ring_emit(ring, cmd); + intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); if (INTEL_GEN(req->i915) >= 8) { - intel_ring_emit(engine, 0); /* upper addr */ - intel_ring_emit(engine, 0); /* value */ + intel_ring_emit(ring, 0); /* upper addr */ + intel_ring_emit(ring, 0); /* value */ } else { - intel_ring_emit(engine, 0); - intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_NOOP); } - intel_ring_advance(engine); + intel_ring_advance(ring); return 0; } static int -gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, - u64 offset, u32 len, - unsigned dispatch_flags) +gen8_emit_bb_start(struct drm_i915_gem_request *req, + u64 offset, u32 len, + unsigned int dispatch_flags) { - struct intel_engine_cs *engine = req->engine; - bool ppgtt = USES_PPGTT(engine->dev) && + struct intel_ring *ring = req->ring; + bool ppgtt = USES_PPGTT(req->i915) && !(dispatch_flags & I915_DISPATCH_SECURE); int ret; @@ -2595,71 +2408,70 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, return ret; /* FIXME(BDW): Address space and security selectors. */ - intel_ring_emit(engine, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) | + intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) | (dispatch_flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0)); - intel_ring_emit(engine, lower_32_bits(offset)); - intel_ring_emit(engine, upper_32_bits(offset)); - intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(engine); + intel_ring_emit(ring, lower_32_bits(offset)); + intel_ring_emit(ring, upper_32_bits(offset)); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); return 0; } static int -hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, - u64 offset, u32 len, - unsigned dispatch_flags) +hsw_emit_bb_start(struct drm_i915_gem_request *req, + u64 offset, u32 len, + unsigned int dispatch_flags) { - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; int ret; ret = intel_ring_begin(req, 2); if (ret) return ret; - intel_ring_emit(engine, + intel_ring_emit(ring, MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) | (dispatch_flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0)); /* bit0-7 is the length on GEN6+ */ - intel_ring_emit(engine, offset); - intel_ring_advance(engine); + intel_ring_emit(ring, offset); + intel_ring_advance(ring); return 0; } static int -gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, - u64 offset, u32 len, - unsigned dispatch_flags) +gen6_emit_bb_start(struct drm_i915_gem_request *req, + u64 offset, u32 len, + unsigned int dispatch_flags) { - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; int ret; ret = intel_ring_begin(req, 2); if (ret) return ret; - intel_ring_emit(engine, + intel_ring_emit(ring, MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); /* bit0-7 is the length on GEN6+ */ - intel_ring_emit(engine, offset); - intel_ring_advance(engine); + intel_ring_emit(ring, offset); + intel_ring_advance(ring); return 0; } /* Blitter support (SandyBridge+) */ -static int gen6_ring_flush(struct drm_i915_gem_request *req, - u32 invalidate, u32 flush) +static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode) { - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; uint32_t cmd; int ret; @@ -2684,19 +2496,19 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, * operation is complete. This bit is only valid when the * Post-Sync Operation field is a value of 1h or 3h." */ - if (invalidate & I915_GEM_DOMAIN_RENDER) + if (mode & EMIT_INVALIDATE) cmd |= MI_INVALIDATE_TLB; - intel_ring_emit(engine, cmd); - intel_ring_emit(engine, + intel_ring_emit(ring, cmd); + intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); if (INTEL_GEN(req->i915) >= 8) { - intel_ring_emit(engine, 0); /* upper addr */ - intel_ring_emit(engine, 0); /* value */ + intel_ring_emit(ring, 0); /* upper addr */ + intel_ring_emit(ring, 0); /* value */ } else { - intel_ring_emit(engine, 0); - intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_NOOP); } - intel_ring_advance(engine); + intel_ring_advance(ring); return 0; } @@ -2707,38 +2519,39 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, struct drm_i915_gem_object *obj; int ret, i; - if (!i915_semaphore_is_enabled(dev_priv)) + if (!i915.semaphores) return; - if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) { + if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) { + struct i915_vma *vma; + obj = i915_gem_object_create(&dev_priv->drm, 4096); - if (IS_ERR(obj)) { - DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); - i915.semaphores = 0; - } else { - i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK); - if (ret != 0) { - drm_gem_object_unreference(&obj->base); - DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n"); - i915.semaphores = 0; - } else { - dev_priv->semaphore_obj = obj; - } - } - } + if (IS_ERR(obj)) + goto err; - if (!i915_semaphore_is_enabled(dev_priv)) - return; + vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL); + if (IS_ERR(vma)) + goto err_obj; + + ret = i915_gem_object_set_to_gtt_domain(obj, false); + if (ret) + goto err_obj; + + ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); + if (ret) + goto err_obj; + + dev_priv->semaphore = vma; + } if (INTEL_GEN(dev_priv) >= 8) { - u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj); + u32 offset = i915_ggtt_offset(dev_priv->semaphore); - engine->semaphore.sync_to = gen8_ring_sync; + engine->semaphore.sync_to = gen8_ring_sync_to; engine->semaphore.signal = gen8_xcs_signal; for (i = 0; i < I915_NUM_ENGINES; i++) { - u64 ring_offset; + u32 ring_offset; if (i != engine->id) ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i); @@ -2748,7 +2561,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, engine->semaphore.signal_ggtt[i] = ring_offset; } } else if (INTEL_GEN(dev_priv) >= 6) { - engine->semaphore.sync_to = gen6_ring_sync; + engine->semaphore.sync_to = gen6_ring_sync_to; engine->semaphore.signal = gen6_signal; /* @@ -2758,52 +2571,62 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, * initialized as INVALID. Gen8 will initialize the * sema between VCS2 and RCS later. */ - for (i = 0; i < I915_NUM_ENGINES; i++) { + for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) { static const struct { u32 wait_mbox; i915_reg_t mbox_reg; - } sem_data[I915_NUM_ENGINES][I915_NUM_ENGINES] = { - [RCS] = { - [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC }, - [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC }, - [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC }, + } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = { + [RCS_HW] = { + [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC }, + [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC }, + [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC }, }, - [VCS] = { - [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC }, - [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC }, - [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC }, + [VCS_HW] = { + [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC }, + [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC }, + [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC }, }, - [BCS] = { - [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC }, - [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC }, - [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC }, + [BCS_HW] = { + [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC }, + [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC }, + [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC }, }, - [VECS] = { - [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC }, - [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC }, - [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC }, + [VECS_HW] = { + [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC }, + [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC }, + [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC }, }, }; u32 wait_mbox; i915_reg_t mbox_reg; - if (i == engine->id || i == VCS2) { + if (i == engine->hw_id) { wait_mbox = MI_SEMAPHORE_SYNC_INVALID; mbox_reg = GEN6_NOSYNC; } else { - wait_mbox = sem_data[engine->id][i].wait_mbox; - mbox_reg = sem_data[engine->id][i].mbox_reg; + wait_mbox = sem_data[engine->hw_id][i].wait_mbox; + mbox_reg = sem_data[engine->hw_id][i].mbox_reg; } engine->semaphore.mbox.wait[i] = wait_mbox; engine->semaphore.mbox.signal[i] = mbox_reg; } } + + return; + +err_obj: + i915_gem_object_put(obj); +err: + DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n"); + i915.semaphores = 0; } static void intel_ring_init_irq(struct drm_i915_private *dev_priv, struct intel_engine_cs *engine) { + engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift; + if (INTEL_GEN(dev_priv) >= 8) { engine->irq_enable = gen8_irq_enable; engine->irq_disable = gen8_irq_disable; @@ -2828,83 +2651,75 @@ static void intel_ring_init_irq(struct drm_i915_private *dev_priv, static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, struct intel_engine_cs *engine) { + intel_ring_init_irq(dev_priv, engine); + intel_ring_init_semaphores(dev_priv, engine); + engine->init_hw = init_ring_common; - engine->write_tail = ring_write_tail; - engine->add_request = i9xx_add_request; - if (INTEL_GEN(dev_priv) >= 6) - engine->add_request = gen6_add_request; + engine->emit_request = i9xx_emit_request; + if (i915.semaphores) + engine->emit_request = gen6_sema_emit_request; + engine->submit_request = i9xx_submit_request; if (INTEL_GEN(dev_priv) >= 8) - engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; + engine->emit_bb_start = gen8_emit_bb_start; else if (INTEL_GEN(dev_priv) >= 6) - engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + engine->emit_bb_start = gen6_emit_bb_start; else if (INTEL_GEN(dev_priv) >= 4) - engine->dispatch_execbuffer = i965_dispatch_execbuffer; + engine->emit_bb_start = i965_emit_bb_start; else if (IS_I830(dev_priv) || IS_845G(dev_priv)) - engine->dispatch_execbuffer = i830_dispatch_execbuffer; + engine->emit_bb_start = i830_emit_bb_start; else - engine->dispatch_execbuffer = i915_dispatch_execbuffer; - - intel_ring_init_irq(dev_priv, engine); - intel_ring_init_semaphores(dev_priv, engine); + engine->emit_bb_start = i915_emit_bb_start; } -int intel_init_render_ring_buffer(struct drm_device *dev) +int intel_init_render_ring_buffer(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_engine_cs *engine = &dev_priv->engine[RCS]; + struct drm_i915_private *dev_priv = engine->i915; int ret; - engine->name = "render ring"; - engine->id = RCS; - engine->exec_id = I915_EXEC_RENDER; - engine->hw_id = 0; - engine->mmio_base = RENDER_RING_BASE; - intel_ring_default_vfuncs(dev_priv, engine); - engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; if (HAS_L3_DPF(dev_priv)) engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT; if (INTEL_GEN(dev_priv) >= 8) { engine->init_context = intel_rcs_ctx_init; - engine->add_request = gen8_render_add_request; - engine->flush = gen8_render_ring_flush; - if (i915_semaphore_is_enabled(dev_priv)) + engine->emit_request = gen8_render_emit_request; + engine->emit_flush = gen8_render_ring_flush; + if (i915.semaphores) engine->semaphore.signal = gen8_rcs_signal; } else if (INTEL_GEN(dev_priv) >= 6) { engine->init_context = intel_rcs_ctx_init; - engine->flush = gen7_render_ring_flush; + engine->emit_flush = gen7_render_ring_flush; if (IS_GEN6(dev_priv)) - engine->flush = gen6_render_ring_flush; + engine->emit_flush = gen6_render_ring_flush; } else if (IS_GEN5(dev_priv)) { - engine->flush = gen4_render_ring_flush; + engine->emit_flush = gen4_render_ring_flush; } else { if (INTEL_GEN(dev_priv) < 4) - engine->flush = gen2_render_ring_flush; + engine->emit_flush = gen2_render_ring_flush; else - engine->flush = gen4_render_ring_flush; + engine->emit_flush = gen4_render_ring_flush; engine->irq_enable_mask = I915_USER_INTERRUPT; } if (IS_HASWELL(dev_priv)) - engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; + engine->emit_bb_start = hsw_emit_bb_start; engine->init_hw = init_render_ring; engine->cleanup = render_ring_cleanup; - ret = intel_init_ring_buffer(dev, engine); + ret = intel_init_ring_buffer(engine); if (ret) return ret; if (INTEL_GEN(dev_priv) >= 6) { - ret = intel_init_pipe_control(engine, 4096); + ret = intel_engine_create_scratch(engine, 4096); if (ret) return ret; } else if (HAS_BROKEN_CS_TLB(dev_priv)) { - ret = intel_init_pipe_control(engine, I830_WA_SIZE); + ret = intel_engine_create_scratch(engine, I830_WA_SIZE); if (ret) return ret; } @@ -2912,166 +2727,71 @@ int intel_init_render_ring_buffer(struct drm_device *dev) return 0; } -int intel_init_bsd_ring_buffer(struct drm_device *dev) +int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_engine_cs *engine = &dev_priv->engine[VCS]; - - engine->name = "bsd ring"; - engine->id = VCS; - engine->exec_id = I915_EXEC_BSD; - engine->hw_id = 1; + struct drm_i915_private *dev_priv = engine->i915; intel_ring_default_vfuncs(dev_priv, engine); if (INTEL_GEN(dev_priv) >= 6) { - engine->mmio_base = GEN6_BSD_RING_BASE; /* gen6 bsd needs a special wa for tail updates */ if (IS_GEN6(dev_priv)) - engine->write_tail = gen6_bsd_ring_write_tail; - engine->flush = gen6_bsd_ring_flush; - if (INTEL_GEN(dev_priv) >= 8) - engine->irq_enable_mask = - GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; - else + engine->submit_request = gen6_bsd_submit_request; + engine->emit_flush = gen6_bsd_ring_flush; + if (INTEL_GEN(dev_priv) < 8) engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; } else { engine->mmio_base = BSD_RING_BASE; - engine->flush = bsd_ring_flush; + engine->emit_flush = bsd_ring_flush; if (IS_GEN5(dev_priv)) engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; else engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; } - return intel_init_ring_buffer(dev, engine); + return intel_init_ring_buffer(engine); } /** * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3) */ -int intel_init_bsd2_ring_buffer(struct drm_device *dev) +int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_engine_cs *engine = &dev_priv->engine[VCS2]; - - engine->name = "bsd2 ring"; - engine->id = VCS2; - engine->exec_id = I915_EXEC_BSD; - engine->hw_id = 4; - engine->mmio_base = GEN8_BSD2_RING_BASE; + struct drm_i915_private *dev_priv = engine->i915; intel_ring_default_vfuncs(dev_priv, engine); - engine->flush = gen6_bsd_ring_flush; - engine->irq_enable_mask = - GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; + engine->emit_flush = gen6_bsd_ring_flush; - return intel_init_ring_buffer(dev, engine); + return intel_init_ring_buffer(engine); } -int intel_init_blt_ring_buffer(struct drm_device *dev) +int intel_init_blt_ring_buffer(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_engine_cs *engine = &dev_priv->engine[BCS]; - - engine->name = "blitter ring"; - engine->id = BCS; - engine->exec_id = I915_EXEC_BLT; - engine->hw_id = 2; - engine->mmio_base = BLT_RING_BASE; + struct drm_i915_private *dev_priv = engine->i915; intel_ring_default_vfuncs(dev_priv, engine); - engine->flush = gen6_ring_flush; - if (INTEL_GEN(dev_priv) >= 8) - engine->irq_enable_mask = - GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; - else + engine->emit_flush = gen6_ring_flush; + if (INTEL_GEN(dev_priv) < 8) engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; - return intel_init_ring_buffer(dev, engine); + return intel_init_ring_buffer(engine); } -int intel_init_vebox_ring_buffer(struct drm_device *dev) +int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_engine_cs *engine = &dev_priv->engine[VECS]; - - engine->name = "video enhancement ring"; - engine->id = VECS; - engine->exec_id = I915_EXEC_VEBOX; - engine->hw_id = 3; - engine->mmio_base = VEBOX_RING_BASE; + struct drm_i915_private *dev_priv = engine->i915; intel_ring_default_vfuncs(dev_priv, engine); - engine->flush = gen6_ring_flush; + engine->emit_flush = gen6_ring_flush; - if (INTEL_GEN(dev_priv) >= 8) { - engine->irq_enable_mask = - GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; - } else { + if (INTEL_GEN(dev_priv) < 8) { engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; engine->irq_enable = hsw_vebox_irq_enable; engine->irq_disable = hsw_vebox_irq_disable; } - return intel_init_ring_buffer(dev, engine); -} - -int -intel_ring_flush_all_caches(struct drm_i915_gem_request *req) -{ - struct intel_engine_cs *engine = req->engine; - int ret; - - if (!engine->gpu_caches_dirty) - return 0; - - ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS); - if (ret) - return ret; - - trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS); - - engine->gpu_caches_dirty = false; - return 0; -} - -int -intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req) -{ - struct intel_engine_cs *engine = req->engine; - uint32_t flush_domains; - int ret; - - flush_domains = 0; - if (engine->gpu_caches_dirty) - flush_domains = I915_GEM_GPU_DOMAINS; - - ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains); - if (ret) - return ret; - - trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains); - - engine->gpu_caches_dirty = false; - return 0; -} - -void -intel_stop_engine(struct intel_engine_cs *engine) -{ - int ret; - - if (!intel_engine_initialized(engine)) - return; - - ret = intel_engine_idle(engine); - if (ret) - DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", - engine->name, ret); - - stop_ring(engine); + return intel_init_ring_buffer(engine); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 12cb7ed90014..84aea549de5d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -3,6 +3,7 @@ #include <linux/hashtable.h> #include "i915_gem_batch_pool.h" +#include "i915_gem_request.h" #define I915_CMD_HASH_ORDER 9 @@ -25,29 +26,29 @@ */ #define I915_RING_FREE_SPACE 64 -struct intel_hw_status_page { - u32 *page_addr; - unsigned int gfx_addr; - struct drm_i915_gem_object *obj; +struct intel_hw_status_page { + struct i915_vma *vma; + u32 *page_addr; + u32 ggtt_offset; }; -#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) -#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) +#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base)) +#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val) -#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) -#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) +#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base)) +#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val) -#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) -#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) +#define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base)) +#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val) -#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) -#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) +#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base)) +#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val) -#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) -#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) +#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base)) +#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val) -#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) -#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) +#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base)) +#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val) /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. @@ -56,13 +57,13 @@ struct intel_hw_status_page { #define GEN8_SEMAPHORE_OFFSET(__from, __to) \ (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size) #define GEN8_SIGNAL_OFFSET(__ring, to) \ - (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ + (dev_priv->semaphore->node.start + \ GEN8_SEMAPHORE_OFFSET((__ring)->id, (to))) #define GEN8_WAIT_OFFSET(__ring, from) \ - (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ + (dev_priv->semaphore->node.start + \ GEN8_SEMAPHORE_OFFSET(from, (__ring)->id)) -enum intel_ring_hangcheck_action { +enum intel_engine_hangcheck_action { HANGCHECK_IDLE = 0, HANGCHECK_WAIT, HANGCHECK_ACTIVE, @@ -72,24 +73,24 @@ enum intel_ring_hangcheck_action { #define HANGCHECK_SCORE_RING_HUNG 31 -struct intel_ring_hangcheck { +struct intel_engine_hangcheck { u64 acthd; - unsigned long user_interrupts; u32 seqno; int score; - enum intel_ring_hangcheck_action action; + enum intel_engine_hangcheck_action action; int deadlock; u32 instdone[I915_NUM_INSTDONE_REG]; }; -struct intel_ringbuffer { - struct drm_i915_gem_object *obj; - void __iomem *virtual_start; +struct intel_ring { struct i915_vma *vma; + void *vaddr; struct intel_engine_cs *engine; struct list_head link; + struct list_head request_list; + u32 head; u32 tail; int space; @@ -121,12 +122,12 @@ struct drm_i915_reg_table; * an option for future use. * size: size of the batch in DWORDS */ -struct i915_ctx_workarounds { +struct i915_ctx_workarounds { struct i915_wa_ctx_bb { u32 offset; u32 size; } indirect_ctx, per_ctx; - struct drm_i915_gem_object *obj; + struct i915_vma *vma; }; struct drm_i915_gem_request; @@ -144,10 +145,18 @@ struct intel_engine_cs { #define I915_NUM_ENGINES 5 #define _VCS(n) (VCS + (n)) unsigned int exec_id; - unsigned int hw_id; - unsigned int guc_id; /* XXX same as hw_id? */ + enum intel_engine_hw_id { + RCS_HW = 0, + VCS_HW, + BCS_HW, + VECS_HW, + VCS2_HW + } hw_id; + enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */ + u64 fence_context; u32 mmio_base; - struct intel_ringbuffer *buffer; + unsigned int irq_shift; + struct intel_ring *buffer; struct list_head buffers; /* Rather than have every client wait upon all user interrupts, @@ -167,8 +176,7 @@ struct intel_engine_cs { * the overhead of waking that client is much preferred. */ struct intel_breadcrumbs { - struct task_struct *irq_seqno_bh; /* bh for user interrupts */ - unsigned long irq_wakeups; + struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */ bool irq_posted; spinlock_t lock; /* protects the lists of requests */ @@ -178,6 +186,9 @@ struct intel_engine_cs { struct task_struct *signaler; /* used for fence signalling */ struct drm_i915_gem_request *first_signal; struct timer_list fake_irq; /* used after a missed interrupt */ + struct timer_list hangcheck; /* detect missed interrupts */ + + unsigned long timeout; bool irq_enabled : 1; bool rpm_wakelock : 1; @@ -192,36 +203,38 @@ struct intel_engine_cs { struct intel_hw_status_page status_page; struct i915_ctx_workarounds wa_ctx; + struct i915_vma *scratch; u32 irq_keep_mask; /* always keep these interrupts */ u32 irq_enable_mask; /* bitmask to enable ring interrupt */ - void (*irq_enable)(struct intel_engine_cs *ring); - void (*irq_disable)(struct intel_engine_cs *ring); + void (*irq_enable)(struct intel_engine_cs *engine); + void (*irq_disable)(struct intel_engine_cs *engine); - int (*init_hw)(struct intel_engine_cs *ring); + int (*init_hw)(struct intel_engine_cs *engine); int (*init_context)(struct drm_i915_gem_request *req); - void (*write_tail)(struct intel_engine_cs *ring, - u32 value); - int __must_check (*flush)(struct drm_i915_gem_request *req, - u32 invalidate_domains, - u32 flush_domains); - int (*add_request)(struct drm_i915_gem_request *req); + int (*emit_flush)(struct drm_i915_gem_request *request, + u32 mode); +#define EMIT_INVALIDATE BIT(0) +#define EMIT_FLUSH BIT(1) +#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH) + int (*emit_bb_start)(struct drm_i915_gem_request *req, + u64 offset, u32 length, + unsigned int dispatch_flags); +#define I915_DISPATCH_SECURE BIT(0) +#define I915_DISPATCH_PINNED BIT(1) +#define I915_DISPATCH_RS BIT(2) + int (*emit_request)(struct drm_i915_gem_request *req); + void (*submit_request)(struct drm_i915_gem_request *req); /* Some chipsets are not quite as coherent as advertised and need * an expensive kick to force a true read of the up-to-date seqno. * However, the up-to-date seqno is not always required and the last * seen value is good enough. Note that the seqno will always be * monotonic, even if not coherent. */ - void (*irq_seqno_barrier)(struct intel_engine_cs *ring); - int (*dispatch_execbuffer)(struct drm_i915_gem_request *req, - u64 offset, u32 length, - unsigned dispatch_flags); -#define I915_DISPATCH_SECURE 0x1 -#define I915_DISPATCH_PINNED 0x2 -#define I915_DISPATCH_RS 0x4 - void (*cleanup)(struct intel_engine_cs *ring); + void (*irq_seqno_barrier)(struct intel_engine_cs *engine); + void (*cleanup)(struct intel_engine_cs *engine); /* GEN8 signal/wait table - never trust comments! * signal to signal to signal to signal to signal to @@ -264,22 +277,22 @@ struct intel_engine_cs { u32 sync_seqno[I915_NUM_ENGINES-1]; union { +#define GEN6_SEMAPHORE_LAST VECS_HW +#define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1) +#define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0) struct { /* our mbox written by others */ - u32 wait[I915_NUM_ENGINES]; + u32 wait[GEN6_NUM_SEMAPHORES]; /* mboxes this ring signals to */ - i915_reg_t signal[I915_NUM_ENGINES]; + i915_reg_t signal[GEN6_NUM_SEMAPHORES]; } mbox; u64 signal_ggtt[I915_NUM_ENGINES]; }; /* AKA wait() */ - int (*sync_to)(struct drm_i915_gem_request *to_req, - struct intel_engine_cs *from, - u32 seqno); - int (*signal)(struct drm_i915_gem_request *signaller_req, - /* num_dwords needed by caller */ - unsigned int num_dwords); + int (*sync_to)(struct drm_i915_gem_request *req, + struct drm_i915_gem_request *signal); + int (*signal)(struct drm_i915_gem_request *req); } semaphore; /* Execlists */ @@ -291,24 +304,6 @@ struct intel_engine_cs { unsigned int idle_lite_restore_wa; bool disable_lite_restore_wa; u32 ctx_desc_template; - int (*emit_request)(struct drm_i915_gem_request *request); - int (*emit_flush)(struct drm_i915_gem_request *request, - u32 invalidate_domains, - u32 flush_domains); - int (*emit_bb_start)(struct drm_i915_gem_request *req, - u64 offset, unsigned dispatch_flags); - - /** - * List of objects currently involved in rendering from the - * ringbuffer. - * - * Includes buffers having the contents of their GPU caches - * flushed, not necessarily primitives. last_read_req - * represents when the rendering involved will be completed. - * - * A reference is held on the buffer while on this list. - */ - struct list_head active_list; /** * List of breadcrumbs associated with GPU requests currently @@ -323,22 +318,22 @@ struct intel_engine_cs { */ u32 last_submitted_seqno; - bool gpu_caches_dirty; + /* An RCU guarded pointer to the last request. No reference is + * held to the request, users must carefully acquire a reference to + * the request using i915_gem_active_get_rcu(), or hold the + * struct_mutex. + */ + struct i915_gem_active last_request; struct i915_gem_context *last_context; - struct intel_ring_hangcheck hangcheck; - - struct { - struct drm_i915_gem_object *obj; - u32 gtt_offset; - } scratch; + struct intel_engine_hangcheck hangcheck; bool needs_cmd_parser; /* * Table of commands the command parser needs to know about - * for this ring. + * for this engine. */ DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); @@ -352,11 +347,11 @@ struct intel_engine_cs { * Returns the bitmask for the length field of the specified command. * Return 0 for an unrecognized/invalid command. * - * If the command parser finds an entry for a command in the ring's + * If the command parser finds an entry for a command in the engine's * cmd_tables, it gets the command's length based on the table entry. - * If not, it calls this function to determine the per-ring length field - * encoding for the command (i.e. certain opcode ranges use certain bits - * to encode the command length in the header). + * If not, it calls this function to determine the per-engine length + * field encoding for the command (i.e. different opcode ranges use + * certain bits to encode the command length in the header). */ u32 (*get_cmd_length_mask)(u32 cmd_header); }; @@ -374,8 +369,8 @@ intel_engine_flag(const struct intel_engine_cs *engine) } static inline u32 -intel_ring_sync_index(struct intel_engine_cs *engine, - struct intel_engine_cs *other) +intel_engine_sync_index(struct intel_engine_cs *engine, + struct intel_engine_cs *other) { int idx; @@ -437,55 +432,74 @@ intel_write_status_page(struct intel_engine_cs *engine, #define I915_GEM_HWS_SCRATCH_INDEX 0x40 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) -struct intel_ringbuffer * -intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size); -int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv, - struct intel_ringbuffer *ringbuf); -void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); -void intel_ringbuffer_free(struct intel_ringbuffer *ring); +struct intel_ring * +intel_engine_create_ring(struct intel_engine_cs *engine, int size); +int intel_ring_pin(struct intel_ring *ring); +void intel_ring_unpin(struct intel_ring *ring); +void intel_ring_free(struct intel_ring *ring); -void intel_stop_engine(struct intel_engine_cs *engine); -void intel_cleanup_engine(struct intel_engine_cs *engine); +void intel_engine_stop(struct intel_engine_cs *engine); +void intel_engine_cleanup(struct intel_engine_cs *engine); int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request); int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n); int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); -static inline void intel_ring_emit(struct intel_engine_cs *engine, - u32 data) + +static inline void intel_ring_emit(struct intel_ring *ring, u32 data) { - struct intel_ringbuffer *ringbuf = engine->buffer; - iowrite32(data, ringbuf->virtual_start + ringbuf->tail); - ringbuf->tail += 4; + *(uint32_t *)(ring->vaddr + ring->tail) = data; + ring->tail += 4; } -static inline void intel_ring_emit_reg(struct intel_engine_cs *engine, - i915_reg_t reg) + +static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg) { - intel_ring_emit(engine, i915_mmio_reg_offset(reg)); + intel_ring_emit(ring, i915_mmio_reg_offset(reg)); } -static inline void intel_ring_advance(struct intel_engine_cs *engine) + +static inline void intel_ring_advance(struct intel_ring *ring) { - struct intel_ringbuffer *ringbuf = engine->buffer; - ringbuf->tail &= ringbuf->size - 1; + /* Dummy function. + * + * This serves as a placeholder in the code so that the reader + * can compare against the preceding intel_ring_begin() and + * check that the number of dwords emitted matches the space + * reserved for the command packet (i.e. the value passed to + * intel_ring_begin()). + */ } + +static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value) +{ + /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ + return value & (ring->size - 1); +} + int __intel_ring_space(int head, int tail, int size); -void intel_ring_update_space(struct intel_ringbuffer *ringbuf); +void intel_ring_update_space(struct intel_ring *ring); -int __must_check intel_engine_idle(struct intel_engine_cs *engine); -void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno); -int intel_ring_flush_all_caches(struct drm_i915_gem_request *req); -int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req); +void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno); -int intel_init_pipe_control(struct intel_engine_cs *engine, int size); -void intel_fini_pipe_control(struct intel_engine_cs *engine); +void intel_engine_setup_common(struct intel_engine_cs *engine); +int intel_engine_init_common(struct intel_engine_cs *engine); +int intel_engine_create_scratch(struct intel_engine_cs *engine, int size); +void intel_engine_cleanup_common(struct intel_engine_cs *engine); -int intel_init_render_ring_buffer(struct drm_device *dev); -int intel_init_bsd_ring_buffer(struct drm_device *dev); -int intel_init_bsd2_ring_buffer(struct drm_device *dev); -int intel_init_blt_ring_buffer(struct drm_device *dev); -int intel_init_vebox_ring_buffer(struct drm_device *dev); +static inline int intel_engine_idle(struct intel_engine_cs *engine, + bool interruptible) +{ + /* Wait upon the last request to be completed */ + return i915_gem_active_wait_unlocked(&engine->last_request, + interruptible, NULL, NULL); +} -u64 intel_ring_get_active_head(struct intel_engine_cs *engine); +int intel_init_render_ring_buffer(struct intel_engine_cs *engine); +int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine); +int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine); +int intel_init_blt_ring_buffer(struct intel_engine_cs *engine); +int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine); + +u64 intel_engine_get_active_head(struct intel_engine_cs *engine); static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine) { return intel_read_status_page(engine, I915_GEM_HWS_INDEX); @@ -493,11 +507,6 @@ static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine) int init_workarounds_ring(struct intel_engine_cs *engine); -static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) -{ - return ringbuf->tail; -} - /* * Arbitrary size for largest possible 'add request' sequence. The code paths * are complex and variable. Empirical measurement shows that the worst case @@ -509,21 +518,10 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) { - return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR; + return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR; } /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */ -struct intel_wait { - struct rb_node node; - struct task_struct *tsk; - u32 seqno; -}; - -struct intel_signal_node { - struct rb_node node; - struct intel_wait wait; -}; - int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); static inline void intel_wait_init(struct intel_wait *wait, u32 seqno) @@ -543,31 +541,42 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine, struct intel_wait *wait); void intel_engine_enable_signaling(struct drm_i915_gem_request *request); -static inline bool intel_engine_has_waiter(struct intel_engine_cs *engine) +static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine) { - return READ_ONCE(engine->breadcrumbs.irq_seqno_bh); + return rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh); } -static inline bool intel_engine_wakeup(struct intel_engine_cs *engine) +static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine) { bool wakeup = false; - struct task_struct *tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh); + /* Note that for this not to dangerously chase a dangling pointer, - * the caller is responsible for ensure that the task remain valid for - * wake_up_process() i.e. that the RCU grace period cannot expire. + * we must hold the rcu_read_lock here. * * Also note that tsk is likely to be in !TASK_RUNNING state so an * early test for tsk->state != TASK_RUNNING before wake_up_process() * is unlikely to be beneficial. */ - if (tsk) - wakeup = wake_up_process(tsk); + if (intel_engine_has_waiter(engine)) { + struct task_struct *tsk; + + rcu_read_lock(); + tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh); + if (tsk) + wakeup = wake_up_process(tsk); + rcu_read_unlock(); + } + return wakeup; } -void intel_engine_enable_fake_irq(struct intel_engine_cs *engine); void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); unsigned int intel_kick_waiters(struct drm_i915_private *i915); unsigned int intel_kick_signalers(struct drm_i915_private *i915); +static inline bool intel_engine_is_active(struct intel_engine_cs *engine) +{ + return i915_gem_active_isset(&engine->last_request); +} + #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 1c603bbe5784..a1d73c2de332 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -592,6 +592,8 @@ void bxt_disable_dc9(struct drm_i915_private *dev_priv) DRM_DEBUG_KMS("Disabling DC9\n"); gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + + intel_pps_unlock_regs_wa(dev_priv); } static void assert_csr_loaded(struct drm_i915_private *dev_priv) @@ -854,7 +856,7 @@ static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { enum skl_disp_power_wells power_well_id = power_well->data; - struct i915_power_well *cmn_a_well; + struct i915_power_well *cmn_a_well = NULL; if (power_well_id == BXT_DPIO_CMN_BC) { /* @@ -867,7 +869,7 @@ static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, bxt_ddi_phy_init(dev_priv, bxt_power_well_to_phy(power_well)); - if (power_well_id == BXT_DPIO_CMN_BC) + if (cmn_a_well) intel_power_well_put(dev_priv, cmn_a_well); } @@ -1121,6 +1123,8 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) } i915_redisable_vga_power_on(&dev_priv->drm); + + intel_pps_unlock_regs_wa(dev_priv); } static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 7c08e4f29032..366900dcde34 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -36,6 +36,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_plane_helper.h> #include "intel_drv.h" +#include "intel_frontbuffer.h" #include <drm/i915_drm.h> #include "i915_drv.h" @@ -202,23 +203,21 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(drm_plane); struct drm_framebuffer *fb = plane_state->base.fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); const int pipe = intel_plane->pipe; const int plane = intel_plane->plane + 1; - u32 plane_ctl, stride_div, stride; + u32 plane_ctl; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - u32 surf_addr; - u32 tile_height, plane_offset, plane_size; + u32 surf_addr = plane_state->main.offset; unsigned int rotation = plane_state->base.rotation; - int x_offset, y_offset; - int crtc_x = plane_state->dst.x1; - int crtc_y = plane_state->dst.y1; - uint32_t crtc_w = drm_rect_width(&plane_state->dst); - uint32_t crtc_h = drm_rect_height(&plane_state->dst); - uint32_t x = plane_state->src.x1 >> 16; - uint32_t y = plane_state->src.y1 >> 16; - uint32_t src_w = drm_rect_width(&plane_state->src) >> 16; - uint32_t src_h = drm_rect_height(&plane_state->src) >> 16; + u32 stride = skl_plane_stride(fb, 0, rotation); + int crtc_x = plane_state->base.dst.x1; + int crtc_y = plane_state->base.dst.y1; + uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); + uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); + uint32_t x = plane_state->main.x; + uint32_t y = plane_state->main.y; + uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; + uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; plane_ctl = PLANE_CTL_ENABLE | PLANE_CTL_PIPE_GAMMA_ENABLE | @@ -229,15 +228,6 @@ skl_update_plane(struct drm_plane *drm_plane, plane_ctl |= skl_plane_ctl_rotation(rotation); - stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0], - fb->pixel_format); - - /* Sizes are 0 based */ - src_w--; - src_h--; - crtc_w--; - crtc_h--; - if (key->flags) { I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value); I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value); @@ -249,28 +239,15 @@ skl_update_plane(struct drm_plane *drm_plane, else if (key->flags & I915_SET_COLORKEY_SOURCE) plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; - surf_addr = intel_plane_obj_offset(intel_plane, obj, 0); - - if (intel_rotation_90_or_270(rotation)) { - int cpp = drm_format_plane_cpp(fb->pixel_format, 0); - - /* stride: Surface height in tiles */ - tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp); - stride = DIV_ROUND_UP(fb->height, tile_height); - plane_size = (src_w << 16) | src_h; - x_offset = stride * tile_height - y - (src_h + 1); - y_offset = x; - } else { - stride = fb->pitches[0] / stride_div; - plane_size = (src_h << 16) | src_w; - x_offset = x; - y_offset = y; - } - plane_offset = y_offset << 16 | x_offset; + /* Sizes are 0 based */ + src_w--; + src_h--; + crtc_w--; + crtc_h--; - I915_WRITE(PLANE_OFFSET(pipe, plane), plane_offset); + I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x); I915_WRITE(PLANE_STRIDE(pipe, plane), stride); - I915_WRITE(PLANE_SIZE(pipe, plane), plane_size); + I915_WRITE(PLANE_SIZE(pipe, plane), (src_h << 16) | src_w); /* program plane scaler */ if (plane_state->scaler_id >= 0) { @@ -295,7 +272,8 @@ skl_update_plane(struct drm_plane *drm_plane, } I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl); - I915_WRITE(PLANE_SURF(pipe, plane), surf_addr); + I915_WRITE(PLANE_SURF(pipe, plane), + intel_fb_gtt_offset(fb, rotation) + surf_addr); POSTING_READ(PLANE_SURF(pipe, plane)); } @@ -362,22 +340,20 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(dplane); struct drm_framebuffer *fb = plane_state->base.fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); int pipe = intel_plane->pipe; int plane = intel_plane->plane; u32 sprctl; u32 sprsurf_offset, linear_offset; unsigned int rotation = dplane->state->rotation; - int cpp = drm_format_plane_cpp(fb->pixel_format, 0); const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - int crtc_x = plane_state->dst.x1; - int crtc_y = plane_state->dst.y1; - uint32_t crtc_w = drm_rect_width(&plane_state->dst); - uint32_t crtc_h = drm_rect_height(&plane_state->dst); - uint32_t x = plane_state->src.x1 >> 16; - uint32_t y = plane_state->src.y1 >> 16; - uint32_t src_w = drm_rect_width(&plane_state->src) >> 16; - uint32_t src_h = drm_rect_height(&plane_state->src) >> 16; + int crtc_x = plane_state->base.dst.x1; + int crtc_y = plane_state->base.dst.y1; + uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); + uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); + uint32_t x = plane_state->base.src.x1 >> 16; + uint32_t y = plane_state->base.src.y1 >> 16; + uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; + uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; sprctl = SP_ENABLE; @@ -430,7 +406,7 @@ vlv_update_plane(struct drm_plane *dplane, */ sprctl |= SP_GAMMA_ENABLE; - if (obj->tiling_mode != I915_TILING_NONE) + if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) sprctl |= SP_TILED; /* Sizes are 0 based */ @@ -439,19 +415,18 @@ vlv_update_plane(struct drm_plane *dplane, crtc_w--; crtc_h--; - linear_offset = y * fb->pitches[0] + x * cpp; - sprsurf_offset = intel_compute_tile_offset(&x, &y, fb, 0, - fb->pitches[0], rotation); - linear_offset -= sprsurf_offset; + intel_add_fb_offsets(&x, &y, plane_state, 0); + sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0); - if (rotation == BIT(DRM_ROTATE_180)) { + if (rotation == DRM_ROTATE_180) { sprctl |= SP_ROTATE_180; x += src_w; y += src_h; - linear_offset += src_h * fb->pitches[0] + src_w * cpp; } + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + if (key->flags) { I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value); I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value); @@ -467,7 +442,7 @@ vlv_update_plane(struct drm_plane *dplane, I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]); I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x); - if (obj->tiling_mode != I915_TILING_NONE) + if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x); else I915_WRITE(SPLINOFF(pipe, plane), linear_offset); @@ -476,8 +451,8 @@ vlv_update_plane(struct drm_plane *dplane, I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); I915_WRITE(SPCNTR(pipe, plane), sprctl); - I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) + - sprsurf_offset); + I915_WRITE(SPSURF(pipe, plane), + intel_fb_gtt_offset(fb, rotation) + sprsurf_offset); POSTING_READ(SPSURF(pipe, plane)); } @@ -505,21 +480,19 @@ ivb_update_plane(struct drm_plane *plane, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(plane); struct drm_framebuffer *fb = plane_state->base.fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); enum pipe pipe = intel_plane->pipe; u32 sprctl, sprscale = 0; u32 sprsurf_offset, linear_offset; unsigned int rotation = plane_state->base.rotation; - int cpp = drm_format_plane_cpp(fb->pixel_format, 0); const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - int crtc_x = plane_state->dst.x1; - int crtc_y = plane_state->dst.y1; - uint32_t crtc_w = drm_rect_width(&plane_state->dst); - uint32_t crtc_h = drm_rect_height(&plane_state->dst); - uint32_t x = plane_state->src.x1 >> 16; - uint32_t y = plane_state->src.y1 >> 16; - uint32_t src_w = drm_rect_width(&plane_state->src) >> 16; - uint32_t src_h = drm_rect_height(&plane_state->src) >> 16; + int crtc_x = plane_state->base.dst.x1; + int crtc_y = plane_state->base.dst.y1; + uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); + uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); + uint32_t x = plane_state->base.src.x1 >> 16; + uint32_t y = plane_state->base.src.y1 >> 16; + uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; + uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; sprctl = SPRITE_ENABLE; @@ -552,7 +525,7 @@ ivb_update_plane(struct drm_plane *plane, */ sprctl |= SPRITE_GAMMA_ENABLE; - if (obj->tiling_mode != I915_TILING_NONE) + if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) sprctl |= SPRITE_TILED; if (IS_HASWELL(dev) || IS_BROADWELL(dev)) @@ -572,22 +545,21 @@ ivb_update_plane(struct drm_plane *plane, if (crtc_w != src_w || crtc_h != src_h) sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; - linear_offset = y * fb->pitches[0] + x * cpp; - sprsurf_offset = intel_compute_tile_offset(&x, &y, fb, 0, - fb->pitches[0], rotation); - linear_offset -= sprsurf_offset; + intel_add_fb_offsets(&x, &y, plane_state, 0); + sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0); - if (rotation == BIT(DRM_ROTATE_180)) { + if (rotation == DRM_ROTATE_180) { sprctl |= SPRITE_ROTATE_180; /* HSW and BDW does this automagically in hardware */ if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { x += src_w; y += src_h; - linear_offset += src_h * fb->pitches[0] + src_w * cpp; } } + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + if (key->flags) { I915_WRITE(SPRKEYVAL(pipe), key->min_value); I915_WRITE(SPRKEYMAX(pipe), key->max_value); @@ -606,7 +578,7 @@ ivb_update_plane(struct drm_plane *plane, * register */ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) I915_WRITE(SPROFFSET(pipe), (y << 16) | x); - else if (obj->tiling_mode != I915_TILING_NONE) + else if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x); else I915_WRITE(SPRLINOFF(pipe), linear_offset); @@ -616,7 +588,7 @@ ivb_update_plane(struct drm_plane *plane, I915_WRITE(SPRSCALE(pipe), sprscale); I915_WRITE(SPRCTL(pipe), sprctl); I915_WRITE(SPRSURF(pipe), - i915_gem_obj_ggtt_offset(obj) + sprsurf_offset); + intel_fb_gtt_offset(fb, rotation) + sprsurf_offset); POSTING_READ(SPRSURF(pipe)); } @@ -646,21 +618,19 @@ ilk_update_plane(struct drm_plane *plane, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *intel_plane = to_intel_plane(plane); struct drm_framebuffer *fb = plane_state->base.fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); int pipe = intel_plane->pipe; u32 dvscntr, dvsscale; u32 dvssurf_offset, linear_offset; unsigned int rotation = plane_state->base.rotation; - int cpp = drm_format_plane_cpp(fb->pixel_format, 0); const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - int crtc_x = plane_state->dst.x1; - int crtc_y = plane_state->dst.y1; - uint32_t crtc_w = drm_rect_width(&plane_state->dst); - uint32_t crtc_h = drm_rect_height(&plane_state->dst); - uint32_t x = plane_state->src.x1 >> 16; - uint32_t y = plane_state->src.y1 >> 16; - uint32_t src_w = drm_rect_width(&plane_state->src) >> 16; - uint32_t src_h = drm_rect_height(&plane_state->src) >> 16; + int crtc_x = plane_state->base.dst.x1; + int crtc_y = plane_state->base.dst.y1; + uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); + uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); + uint32_t x = plane_state->base.src.x1 >> 16; + uint32_t y = plane_state->base.src.y1 >> 16; + uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; + uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; dvscntr = DVS_ENABLE; @@ -693,7 +663,7 @@ ilk_update_plane(struct drm_plane *plane, */ dvscntr |= DVS_GAMMA_ENABLE; - if (obj->tiling_mode != I915_TILING_NONE) + if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) dvscntr |= DVS_TILED; if (IS_GEN6(dev)) @@ -709,19 +679,18 @@ ilk_update_plane(struct drm_plane *plane, if (crtc_w != src_w || crtc_h != src_h) dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; - linear_offset = y * fb->pitches[0] + x * cpp; - dvssurf_offset = intel_compute_tile_offset(&x, &y, fb, 0, - fb->pitches[0], rotation); - linear_offset -= dvssurf_offset; + intel_add_fb_offsets(&x, &y, plane_state, 0); + dvssurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0); - if (rotation == BIT(DRM_ROTATE_180)) { + if (rotation == DRM_ROTATE_180) { dvscntr |= DVS_ROTATE_180; x += src_w; y += src_h; - linear_offset += src_h * fb->pitches[0] + src_w * cpp; } + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + if (key->flags) { I915_WRITE(DVSKEYVAL(pipe), key->min_value); I915_WRITE(DVSKEYMAX(pipe), key->max_value); @@ -736,7 +705,7 @@ ilk_update_plane(struct drm_plane *plane, I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); - if (obj->tiling_mode != I915_TILING_NONE) + if (fb->modifier[0] == I915_FORMAT_MOD_X_TILED) I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x); else I915_WRITE(DVSLINOFF(pipe), linear_offset); @@ -745,7 +714,7 @@ ilk_update_plane(struct drm_plane *plane, I915_WRITE(DVSSCALE(pipe), dvsscale); I915_WRITE(DVSCNTR(pipe), dvscntr); I915_WRITE(DVSSURF(pipe), - i915_gem_obj_ggtt_offset(obj) + dvssurf_offset); + intel_fb_gtt_offset(fb, rotation) + dvssurf_offset); POSTING_READ(DVSSURF(pipe)); } @@ -778,15 +747,26 @@ intel_check_sprite_plane(struct drm_plane *plane, int crtc_x, crtc_y; unsigned int crtc_w, crtc_h; uint32_t src_x, src_y, src_w, src_h; - struct drm_rect *src = &state->src; - struct drm_rect *dst = &state->dst; + struct drm_rect *src = &state->base.src; + struct drm_rect *dst = &state->base.dst; const struct drm_rect *clip = &state->clip; int hscale, vscale; int max_scale, min_scale; bool can_scale; + int ret; + + src->x1 = state->base.src_x; + src->y1 = state->base.src_y; + src->x2 = state->base.src_x + state->base.src_w; + src->y2 = state->base.src_y + state->base.src_h; + + dst->x1 = state->base.crtc_x; + dst->y1 = state->base.crtc_y; + dst->x2 = state->base.crtc_x + state->base.crtc_w; + dst->y2 = state->base.crtc_y + state->base.crtc_h; if (!fb) { - state->visible = false; + state->base.visible = false; return 0; } @@ -834,14 +814,14 @@ intel_check_sprite_plane(struct drm_plane *plane, vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale); BUG_ON(vscale < 0); - state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale); + state->base.visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale); crtc_x = dst->x1; crtc_y = dst->y1; crtc_w = drm_rect_width(dst); crtc_h = drm_rect_height(dst); - if (state->visible) { + if (state->base.visible) { /* check again in case clipping clamped the results */ hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale); if (hscale < 0) { @@ -898,12 +878,12 @@ intel_check_sprite_plane(struct drm_plane *plane, crtc_w &= ~1; if (crtc_w == 0) - state->visible = false; + state->base.visible = false; } } /* Check size restrictions when scaling */ - if (state->visible && (src_w != crtc_w || src_h != crtc_h)) { + if (state->base.visible && (src_w != crtc_w || src_h != crtc_h)) { unsigned int width_bytes; int cpp = drm_format_plane_cpp(fb->pixel_format, 0); @@ -912,10 +892,10 @@ intel_check_sprite_plane(struct drm_plane *plane, /* FIXME interlacing min height is 6 */ if (crtc_w < 3 || crtc_h < 3) - state->visible = false; + state->base.visible = false; if (src_w < 3 || src_h < 3) - state->visible = false; + state->base.visible = false; width_bytes = ((src_x * cpp) & 63) + src_w * cpp; @@ -926,7 +906,7 @@ intel_check_sprite_plane(struct drm_plane *plane, } } - if (state->visible) { + if (state->base.visible) { src->x1 = src_x << 16; src->x2 = (src_x + src_w) << 16; src->y1 = src_y << 16; @@ -938,6 +918,12 @@ intel_check_sprite_plane(struct drm_plane *plane, dst->y1 = crtc_y; dst->y2 = crtc_y + crtc_h; + if (INTEL_GEN(dev) >= 9) { + ret = skl_check_plane_surface(state); + if (ret) + return ret; + } + return 0; } diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index ff80a81b1a84..43f833901b8e 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -435,7 +435,7 @@ void intel_uncore_sanitize(struct drm_i915_private *dev_priv) i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6); /* BIOS often leaves RC6 enabled, but disable it for hw init */ - intel_disable_gt_powersave(dev_priv); + intel_sanitize_gt_powersave(dev_priv); } static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, @@ -1618,8 +1618,10 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv, * @timeout_ms: timeout in millisecond * * This routine waits until the target register @reg contains the expected - * @value after applying the @mask, i.e. it waits until - * (I915_READ_FW(@reg) & @mask) == @value + * @value after applying the @mask, i.e. it waits until :: + * + * (I915_READ_FW(reg) & mask) == value + * * Otherwise, the wait will timeout after @timeout_ms milliseconds. * * Note that this routine assumes the caller holds forcewake asserted, it is @@ -1652,8 +1654,10 @@ int intel_wait_for_register_fw(struct drm_i915_private *dev_priv, * @timeout_ms: timeout in millisecond * * This routine waits until the target register @reg contains the expected - * @value after applying the @mask, i.e. it waits until - * (I915_READ(@reg) & @mask) == @value + * @value after applying the @mask, i.e. it waits until :: + * + * (I915_READ(reg) & mask) == value + * * Otherwise, the wait will timeout after @timeout_ms milliseconds. * * Returns 0 if the register matches the desired condition, or -ETIMEOUT. diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 9f7dafce3a4c..56dfc4cd50c6 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -16,7 +16,6 @@ #include <linux/component.h> #include <linux/device.h> #include <linux/dma-buf.h> -#include <linux/fb.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/reservation.h> @@ -58,12 +57,6 @@ static int legacyfb_depth = 16; module_param(legacyfb_depth, int, 0444); #endif -unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc) -{ - return drm_crtc_index(crtc->crtc); -} -EXPORT_SYMBOL_GPL(imx_drm_crtc_id); - static void imx_drm_driver_lastclose(struct drm_device *drm) { struct imx_drm_device *imxdrm = drm->dev_private; @@ -90,24 +83,6 @@ static int imx_drm_driver_unload(struct drm_device *drm) return 0; } -int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc) -{ - return drm_crtc_vblank_get(imx_drm_crtc->crtc); -} -EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_get); - -void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc) -{ - drm_crtc_vblank_put(imx_drm_crtc->crtc); -} -EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_put); - -void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc) -{ - drm_crtc_handle_vblank(imx_drm_crtc->crtc); -} -EXPORT_SYMBOL_GPL(imx_drm_handle_vblank); - static int imx_drm_enable_vblank(struct drm_device *drm, unsigned int pipe) { struct imx_drm_device *imxdrm = drm->dev_private; @@ -218,7 +193,8 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_commit_modeset_disables(dev, state); - drm_atomic_helper_commit_planes(dev, state, true); + drm_atomic_helper_commit_planes(dev, state, + DRM_PLANE_COMMIT_ACTIVE_ONLY); drm_atomic_helper_commit_modeset_enables(dev, state); diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h index 07d33e45f90f..5a91cb16c8fa 100644 --- a/drivers/gpu/drm/imx/imx-drm.h +++ b/drivers/gpu/drm/imx/imx-drm.h @@ -13,8 +13,6 @@ struct drm_plane; struct imx_drm_crtc; struct platform_device; -unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc); - struct imx_crtc_state { struct drm_crtc_state base; u32 bus_format; @@ -44,10 +42,6 @@ int imx_drm_init_drm(struct platform_device *pdev, int preferred_bpp); int imx_drm_exit_drm(void); -int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc); -void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc); -void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc); - void imx_drm_mode_config_init(struct drm_device *drm); struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb); diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index b03919ed60ba..4eed3a6addad 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -57,7 +57,11 @@ struct imx_ldb_channel { struct imx_ldb *ldb; struct drm_connector connector; struct drm_encoder encoder; + + /* Defines what is connected to the ldb, only one at a time */ struct drm_panel *panel; + struct drm_bridge *bridge; + struct device_node *child; struct i2c_adapter *ddc; int chno; @@ -66,6 +70,7 @@ struct imx_ldb_channel { struct drm_display_mode mode; int mode_valid; u32 bus_format; + u32 bus_flags; }; static inline struct imx_ldb_channel *con_to_imx_ldb_ch(struct drm_connector *c) @@ -251,11 +256,13 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder) drm_panel_enable(imx_ldb_ch->panel); } -static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *orig_mode, - struct drm_display_mode *mode) +static void +imx_ldb_encoder_atomic_mode_set(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *connector_state) { struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); + struct drm_display_mode *mode = &crtc_state->adjusted_mode; struct imx_ldb *ldb = imx_ldb_ch->ldb; int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; unsigned long serial_clk; @@ -297,17 +304,11 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder, } if (!bus_format) { - struct drm_connector *connector; - - drm_for_each_connector(connector, encoder->dev) { - struct drm_display_info *di = &connector->display_info; + struct drm_connector *connector = connector_state->connector; + struct drm_display_info *di = &connector->display_info; - if (connector->encoder == encoder && - di->num_bus_formats) { - bus_format = di->bus_formats[0]; - break; - } - } + if (di->num_bus_formats) + bus_format = di->bus_formats[0]; } imx_ldb_ch_set_bus_format(imx_ldb_ch, bus_format); } @@ -379,8 +380,13 @@ static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder, u32 bus_format = imx_ldb_ch->bus_format; /* Bus format description in DT overrides connector display info. */ - if (!bus_format && di->num_bus_formats) + if (!bus_format && di->num_bus_formats) { bus_format = di->bus_formats[0]; + imx_crtc_state->bus_flags = di->bus_flags; + } else { + bus_format = imx_ldb_ch->bus_format; + imx_crtc_state->bus_flags = imx_ldb_ch->bus_flags; + } switch (bus_format) { case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB666_1X18; @@ -420,7 +426,7 @@ static const struct drm_encoder_funcs imx_ldb_encoder_funcs = { }; static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = { - .mode_set = imx_ldb_encoder_mode_set, + .atomic_mode_set = imx_ldb_encoder_atomic_mode_set, .enable = imx_ldb_encoder_enable, .disable = imx_ldb_encoder_disable, .atomic_check = imx_ldb_encoder_atomic_check, @@ -466,10 +472,30 @@ static int imx_ldb_register(struct drm_device *drm, drm_encoder_init(drm, encoder, &imx_ldb_encoder_funcs, DRM_MODE_ENCODER_LVDS, NULL); - drm_connector_helper_add(&imx_ldb_ch->connector, - &imx_ldb_connector_helper_funcs); - drm_connector_init(drm, &imx_ldb_ch->connector, - &imx_ldb_connector_funcs, DRM_MODE_CONNECTOR_LVDS); + if (imx_ldb_ch->bridge) { + imx_ldb_ch->bridge->encoder = encoder; + + imx_ldb_ch->encoder.bridge = imx_ldb_ch->bridge; + ret = drm_bridge_attach(drm, imx_ldb_ch->bridge); + if (ret) { + DRM_ERROR("Failed to initialize bridge with drm\n"); + return ret; + } + } else { + /* + * We want to add the connector whenever there is no bridge + * that brings its own, not only when there is a panel. For + * historical reasons, the ldb driver can also work without + * a panel. + */ + drm_connector_helper_add(&imx_ldb_ch->connector, + &imx_ldb_connector_helper_funcs); + drm_connector_init(drm, &imx_ldb_ch->connector, + &imx_ldb_connector_funcs, + DRM_MODE_CONNECTOR_LVDS); + drm_mode_connector_attach_encoder(&imx_ldb_ch->connector, + encoder); + } if (imx_ldb_ch->panel) { ret = drm_panel_attach(imx_ldb_ch->panel, @@ -478,8 +504,6 @@ static int imx_ldb_register(struct drm_device *drm, return ret; } - drm_mode_connector_attach_encoder(&imx_ldb_ch->connector, encoder); - return 0; } @@ -548,6 +572,46 @@ static const struct of_device_id imx_ldb_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, imx_ldb_dt_ids); +static int imx_ldb_panel_ddc(struct device *dev, + struct imx_ldb_channel *channel, struct device_node *child) +{ + struct device_node *ddc_node; + const u8 *edidp; + int ret; + + ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0); + if (ddc_node) { + channel->ddc = of_find_i2c_adapter_by_node(ddc_node); + of_node_put(ddc_node); + if (!channel->ddc) { + dev_warn(dev, "failed to get ddc i2c adapter\n"); + return -EPROBE_DEFER; + } + } + + if (!channel->ddc) { + /* if no DDC available, fallback to hardcoded EDID */ + dev_dbg(dev, "no ddc available\n"); + + edidp = of_get_property(child, "edid", + &channel->edid_len); + if (edidp) { + channel->edid = kmemdup(edidp, + channel->edid_len, + GFP_KERNEL); + } else if (!channel->panel) { + /* fallback to display-timings node */ + ret = of_get_drm_display_mode(child, + &channel->mode, + &channel->bus_flags, + OF_USE_NATIVE_MODE); + if (!ret) + channel->mode_valid = 1; + } + } + return 0; +} + static int imx_ldb_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = data; @@ -555,7 +619,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) const struct of_device_id *of_id = of_match_device(imx_ldb_dt_ids, dev); struct device_node *child; - const u8 *edidp; struct imx_ldb *imx_ldb; int dual; int ret; @@ -605,7 +668,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) for_each_child_of_node(np, child) { struct imx_ldb_channel *channel; - struct device_node *ddc_node; struct device_node *ep; int bus_format; @@ -638,46 +700,25 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) remote = of_graph_get_remote_port_parent(ep); of_node_put(ep); - if (remote) + if (remote) { channel->panel = of_drm_find_panel(remote); - else + channel->bridge = of_drm_find_bridge(remote); + } else return -EPROBE_DEFER; of_node_put(remote); - if (!channel->panel) { - dev_err(dev, "panel not found: %s\n", - remote->full_name); - return -EPROBE_DEFER; - } - } - ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0); - if (ddc_node) { - channel->ddc = of_find_i2c_adapter_by_node(ddc_node); - of_node_put(ddc_node); - if (!channel->ddc) { - dev_warn(dev, "failed to get ddc i2c adapter\n"); + if (!channel->panel && !channel->bridge) { + dev_err(dev, "panel/bridge not found: %s\n", + remote->full_name); return -EPROBE_DEFER; } } - if (!channel->ddc) { - /* if no DDC available, fallback to hardcoded EDID */ - dev_dbg(dev, "no ddc available\n"); - - edidp = of_get_property(child, "edid", - &channel->edid_len); - if (edidp) { - channel->edid = kmemdup(edidp, - channel->edid_len, - GFP_KERNEL); - } else if (!channel->panel) { - /* fallback to display-timings node */ - ret = of_get_drm_display_mode(child, - &channel->mode, - OF_USE_NATIVE_MODE); - if (!ret) - channel->mode_valid = 1; - } + /* panel ddc only if there is no bridge */ + if (!channel->bridge) { + ret = imx_ldb_panel_ddc(dev, channel, child); + if (ret) + return ret; } bus_format = of_get_bus_format(dev, child); diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 08e188bc10fc..6e1dc902522c 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -21,7 +21,6 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h> -#include <linux/fb.h> #include <linux/clk.h> #include <linux/errno.h> #include <drm/drm_gem_cma_helper.h> @@ -134,7 +133,7 @@ static irqreturn_t ipu_irq_handler(int irq, void *dev_id) { struct ipu_crtc *ipu_crtc = dev_id; - imx_drm_handle_vblank(ipu_crtc->imx_crtc); + drm_crtc_handle_vblank(&ipu_crtc->base); return IRQ_HANDLED; } diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 1dad297b01fd..74b0ac06fdab 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -33,6 +33,7 @@ struct imx_parallel_display { void *edid; int edid_len; u32 bus_format; + u32 bus_flags; struct drm_display_mode mode; struct drm_panel *panel; struct drm_bridge *bridge; @@ -80,6 +81,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector) return -EINVAL; ret = of_get_drm_display_mode(np, &imxpd->mode, + &imxpd->bus_flags, OF_USE_NATIVE_MODE); if (ret) return ret; @@ -125,11 +127,13 @@ static int imx_pd_encoder_atomic_check(struct drm_encoder *encoder, struct drm_display_info *di = &conn_state->connector->display_info; struct imx_parallel_display *imxpd = enc_to_imxpd(encoder); - imx_crtc_state->bus_flags = di->bus_flags; - if (!imxpd->bus_format && di->num_bus_formats) + if (!imxpd->bus_format && di->num_bus_formats) { + imx_crtc_state->bus_flags = di->bus_flags; imx_crtc_state->bus_format = di->bus_formats[0]; - else + } else { + imx_crtc_state->bus_flags = imxpd->bus_flags; imx_crtc_state->bus_format = imxpd->bus_format; + } imx_crtc_state->di_hsync_pin = 2; imx_crtc_state->di_vsync_pin = 3; diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 8f62671fcfbf..019b7ca392d7 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -103,7 +103,8 @@ static void mtk_ovl_stop(struct mtk_ddp_comp *comp) } static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w, - unsigned int h, unsigned int vrefresh) + unsigned int h, unsigned int vrefresh, + unsigned int bpc) { if (w != 0 && h != 0) writel_relaxed(h << 16 | w, comp->regs + DISP_REG_OVL_ROI_SIZE); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index 5fb80cbe4c5b..0df05f95b916 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c @@ -106,7 +106,8 @@ static void mtk_rdma_stop(struct mtk_ddp_comp *comp) } static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width, - unsigned int height, unsigned int vrefresh) + unsigned int height, unsigned int vrefresh, + unsigned int bpc) { unsigned int threshold; unsigned int reg; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 24aa3bad1e76..01a21dd835b5 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -31,7 +31,7 @@ * struct mtk_drm_crtc - MediaTek specific crtc structure. * @base: crtc object. * @enabled: records whether crtc_enable succeeded - * @planes: array of 4 mtk_drm_plane structures, one for each overlay plane + * @planes: array of 4 drm_plane structures, one for each overlay plane * @pending_planes: whether any plane has pending changes to be applied * @config_regs: memory mapped mmsys configuration register space * @mutex: handle to one of the ten disp_mutex streams @@ -45,7 +45,7 @@ struct mtk_drm_crtc { bool pending_needs_vblank; struct drm_pending_vblank_event *event; - struct mtk_drm_plane planes[OVL_LAYER_NR]; + struct drm_plane planes[OVL_LAYER_NR]; bool pending_planes; void __iomem *config_regs; @@ -112,8 +112,7 @@ static void mtk_drm_crtc_reset(struct drm_crtc *crtc) struct mtk_crtc_state *state; if (crtc->state) { - if (crtc->state->mode_blob) - drm_property_unreference_blob(crtc->state->mode_blob); + __drm_atomic_helper_crtc_destroy_state(crtc->state); state = to_mtk_crtc_state(crtc->state); memset(state, 0, sizeof(*state)); @@ -222,7 +221,9 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc) static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) { struct drm_crtc *crtc = &mtk_crtc->base; - unsigned int width, height, vrefresh; + struct drm_connector *connector; + struct drm_encoder *encoder; + unsigned int width, height, vrefresh, bpc = MTK_MAX_BPC; int ret; int i; @@ -234,6 +235,19 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) height = crtc->state->adjusted_mode.vdisplay; vrefresh = crtc->state->adjusted_mode.vrefresh; + drm_for_each_encoder(encoder, crtc->dev) { + if (encoder->crtc != crtc) + continue; + + drm_for_each_connector(connector, crtc->dev) { + if (connector->encoder != encoder) + continue; + if (connector->display_info.bpc != 0 && + bpc > connector->display_info.bpc) + bpc = connector->display_info.bpc; + } + } + ret = pm_runtime_get_sync(crtc->dev->dev); if (ret < 0) { DRM_ERROR("Failed to enable power domain: %d\n", ret); @@ -266,13 +280,13 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i]; - mtk_ddp_comp_config(comp, width, height, vrefresh); + mtk_ddp_comp_config(comp, width, height, vrefresh, bpc); mtk_ddp_comp_start(comp); } /* Initially configure all planes */ for (i = 0; i < OVL_LAYER_NR; i++) { - struct drm_plane *plane = &mtk_crtc->planes[i].base; + struct drm_plane *plane = &mtk_crtc->planes[i]; struct mtk_plane_state *plane_state; plane_state = to_mtk_plane_state(plane->state); @@ -351,7 +365,7 @@ static void mtk_drm_crtc_disable(struct drm_crtc *crtc) /* Set all pending plane state to disabled */ for (i = 0; i < OVL_LAYER_NR; i++) { - struct drm_plane *plane = &mtk_crtc->planes[i].base; + struct drm_plane *plane = &mtk_crtc->planes[i]; struct mtk_plane_state *plane_state; plane_state = to_mtk_plane_state(plane->state); @@ -397,7 +411,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc, if (mtk_crtc->event) mtk_crtc->pending_needs_vblank = true; for (i = 0; i < OVL_LAYER_NR; i++) { - struct drm_plane *plane = &mtk_crtc->planes[i].base; + struct drm_plane *plane = &mtk_crtc->planes[i]; struct mtk_plane_state *plane_state; plane_state = to_mtk_plane_state(plane->state); @@ -409,6 +423,9 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc, } if (pending_planes) mtk_crtc->pending_planes = true; + if (crtc->state->color_mgmt_changed) + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) + mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state); } static const struct drm_crtc_funcs mtk_crtc_funcs = { @@ -418,6 +435,7 @@ static const struct drm_crtc_funcs mtk_crtc_funcs = { .reset = mtk_drm_crtc_reset, .atomic_duplicate_state = mtk_drm_crtc_duplicate_state, .atomic_destroy_state = mtk_drm_crtc_destroy_state, + .gamma_set = drm_atomic_helper_legacy_gamma_set, }; static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = { @@ -464,14 +482,14 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl) if (state->pending_config) { mtk_ddp_comp_config(ovl, state->pending_width, state->pending_height, - state->pending_vrefresh); + state->pending_vrefresh, 0); state->pending_config = false; } if (mtk_crtc->pending_planes) { for (i = 0; i < OVL_LAYER_NR; i++) { - struct drm_plane *plane = &mtk_crtc->planes[i].base; + struct drm_plane *plane = &mtk_crtc->planes[i]; struct mtk_plane_state *plane_state; plane_state = to_mtk_plane_state(plane->state); @@ -559,16 +577,17 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, (zpos == 1) ? DRM_PLANE_TYPE_CURSOR : DRM_PLANE_TYPE_OVERLAY; ret = mtk_plane_init(drm_dev, &mtk_crtc->planes[zpos], - BIT(pipe), type, zpos); + BIT(pipe), type); if (ret) goto unprepare; } - ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0].base, - &mtk_crtc->planes[1].base, pipe); + ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0], + &mtk_crtc->planes[1], pipe); if (ret < 0) goto unprepare; - + drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE); + drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE); priv->crtc[pipe] = &mtk_crtc->base; priv->num_pipes++; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h index 81e5566ec82f..a1550fa3c9d2 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h @@ -19,10 +19,12 @@ #include "mtk_drm_plane.h" #define OVL_LAYER_NR 4 +#define MTK_LUT_SIZE 512 +#define MTK_MAX_BPC 10 +#define MTK_MIN_BPC 3 int mtk_drm_crtc_enable_vblank(struct drm_device *drm, unsigned int pipe); void mtk_drm_crtc_disable_vblank(struct drm_device *drm, unsigned int pipe); -void mtk_drm_crtc_check_flush(struct drm_crtc *crtc); void mtk_drm_crtc_commit(struct drm_crtc *crtc); void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl); int mtk_drm_crtc_create(struct drm_device *drm_dev, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index 3970fcf0f05f..df33b3ca6ffd 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -24,12 +24,17 @@ #include "mtk_drm_drv.h" #include "mtk_drm_plane.h" #include "mtk_drm_ddp_comp.h" +#include "mtk_drm_crtc.h" #define DISP_OD_EN 0x0000 #define DISP_OD_INTEN 0x0008 #define DISP_OD_INTSTA 0x000c #define DISP_OD_CFG 0x0020 #define DISP_OD_SIZE 0x0030 +#define DISP_DITHER_5 0x0114 +#define DISP_DITHER_7 0x011c +#define DISP_DITHER_15 0x013c +#define DISP_DITHER_16 0x0140 #define DISP_REG_UFO_START 0x0000 @@ -38,15 +43,69 @@ #define DISP_COLOR_WIDTH 0x0c50 #define DISP_COLOR_HEIGHT 0x0c54 -#define OD_RELAY_MODE BIT(0) +#define DISP_AAL_EN 0x0000 +#define DISP_AAL_SIZE 0x0030 -#define UFO_BYPASS BIT(2) +#define DISP_GAMMA_EN 0x0000 +#define DISP_GAMMA_CFG 0x0020 +#define DISP_GAMMA_SIZE 0x0030 +#define DISP_GAMMA_LUT 0x0700 -#define COLOR_BYPASS_ALL BIT(7) -#define COLOR_SEQ_SEL BIT(13) +#define LUT_10BIT_MASK 0x03ff + +#define COLOR_BYPASS_ALL BIT(7) +#define COLOR_SEQ_SEL BIT(13) + +#define OD_RELAYMODE BIT(0) + +#define UFO_BYPASS BIT(2) + +#define AAL_EN BIT(0) + +#define GAMMA_EN BIT(0) +#define GAMMA_LUT_EN BIT(1) + +#define DISP_DITHERING BIT(2) +#define DITHER_LSB_ERR_SHIFT_R(x) (((x) & 0x7) << 28) +#define DITHER_OVFLW_BIT_R(x) (((x) & 0x7) << 24) +#define DITHER_ADD_LSHIFT_R(x) (((x) & 0x7) << 20) +#define DITHER_ADD_RSHIFT_R(x) (((x) & 0x7) << 16) +#define DITHER_NEW_BIT_MODE BIT(0) +#define DITHER_LSB_ERR_SHIFT_B(x) (((x) & 0x7) << 28) +#define DITHER_OVFLW_BIT_B(x) (((x) & 0x7) << 24) +#define DITHER_ADD_LSHIFT_B(x) (((x) & 0x7) << 20) +#define DITHER_ADD_RSHIFT_B(x) (((x) & 0x7) << 16) +#define DITHER_LSB_ERR_SHIFT_G(x) (((x) & 0x7) << 12) +#define DITHER_OVFLW_BIT_G(x) (((x) & 0x7) << 8) +#define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4) +#define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0) + +void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc, + unsigned int CFG) +{ + /* If bpc equal to 0, the dithering function didn't be enabled */ + if (bpc == 0) + return; + + if (bpc >= MTK_MIN_BPC) { + writel(0, comp->regs + DISP_DITHER_5); + writel(0, comp->regs + DISP_DITHER_7); + writel(DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) | + DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) | + DITHER_NEW_BIT_MODE, + comp->regs + DISP_DITHER_15); + writel(DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) | + DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) | + DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) | + DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc), + comp->regs + DISP_DITHER_16); + writel(DISP_DITHERING, comp->regs + CFG); + } +} static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w, - unsigned int h, unsigned int vrefresh) + unsigned int h, unsigned int vrefresh, + unsigned int bpc) { writel(w, comp->regs + DISP_COLOR_WIDTH); writel(h, comp->regs + DISP_COLOR_HEIGHT); @@ -60,14 +119,16 @@ static void mtk_color_start(struct mtk_ddp_comp *comp) } static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w, - unsigned int h, unsigned int vrefresh) + unsigned int h, unsigned int vrefresh, + unsigned int bpc) { writel(w << 16 | h, comp->regs + DISP_OD_SIZE); + writel(OD_RELAYMODE, comp->regs + OD_RELAYMODE); + mtk_dither_set(comp, bpc, DISP_OD_CFG); } static void mtk_od_start(struct mtk_ddp_comp *comp) { - writel(OD_RELAY_MODE, comp->regs + DISP_OD_CFG); writel(1, comp->regs + DISP_OD_EN); } @@ -76,6 +137,78 @@ static void mtk_ufoe_start(struct mtk_ddp_comp *comp) writel(UFO_BYPASS, comp->regs + DISP_REG_UFO_START); } +static void mtk_aal_config(struct mtk_ddp_comp *comp, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc) +{ + writel(h << 16 | w, comp->regs + DISP_AAL_SIZE); +} + +static void mtk_aal_start(struct mtk_ddp_comp *comp) +{ + writel(AAL_EN, comp->regs + DISP_AAL_EN); +} + +static void mtk_aal_stop(struct mtk_ddp_comp *comp) +{ + writel_relaxed(0x0, comp->regs + DISP_AAL_EN); +} + +static void mtk_gamma_config(struct mtk_ddp_comp *comp, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc) +{ + writel(h << 16 | w, comp->regs + DISP_GAMMA_SIZE); + mtk_dither_set(comp, bpc, DISP_GAMMA_CFG); +} + +static void mtk_gamma_start(struct mtk_ddp_comp *comp) +{ + writel(GAMMA_EN, comp->regs + DISP_GAMMA_EN); +} + +static void mtk_gamma_stop(struct mtk_ddp_comp *comp) +{ + writel_relaxed(0x0, comp->regs + DISP_GAMMA_EN); +} + +static void mtk_gamma_set(struct mtk_ddp_comp *comp, + struct drm_crtc_state *state) +{ + unsigned int i, reg; + struct drm_color_lut *lut; + void __iomem *lut_base; + u32 word; + + if (state->gamma_lut) { + reg = readl(comp->regs + DISP_GAMMA_CFG); + reg = reg | GAMMA_LUT_EN; + writel(reg, comp->regs + DISP_GAMMA_CFG); + lut_base = comp->regs + DISP_GAMMA_LUT; + lut = (struct drm_color_lut *)state->gamma_lut->data; + for (i = 0; i < MTK_LUT_SIZE; i++) { + word = (((lut[i].red >> 6) & LUT_10BIT_MASK) << 20) + + (((lut[i].green >> 6) & LUT_10BIT_MASK) << 10) + + ((lut[i].blue >> 6) & LUT_10BIT_MASK); + writel(word, (lut_base + i * 4)); + } + } +} + +static const struct mtk_ddp_comp_funcs ddp_aal = { + .gamma_set = mtk_gamma_set, + .config = mtk_aal_config, + .start = mtk_aal_start, + .stop = mtk_aal_stop, +}; + +static const struct mtk_ddp_comp_funcs ddp_gamma = { + .gamma_set = mtk_gamma_set, + .config = mtk_gamma_config, + .start = mtk_gamma_start, + .stop = mtk_gamma_stop, +}; + static const struct mtk_ddp_comp_funcs ddp_color = { .config = mtk_color_config, .start = mtk_color_start, @@ -112,13 +245,13 @@ struct mtk_ddp_comp_match { }; static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = { - [DDP_COMPONENT_AAL] = { MTK_DISP_AAL, 0, NULL }, + [DDP_COMPONENT_AAL] = { MTK_DISP_AAL, 0, &ddp_aal }, [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color }, [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color }, [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL }, [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, NULL }, [DDP_COMPONENT_DSI1] = { MTK_DSI, 1, NULL }, - [DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, NULL }, + [DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, &ddp_gamma }, [DDP_COMPONENT_OD] = { MTK_DISP_OD, 0, &ddp_od }, [DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, NULL }, [DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, NULL }, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index 6b13ba97094d..22a33ee451c4 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h @@ -21,6 +21,7 @@ struct device_node; struct drm_crtc; struct drm_device; struct mtk_plane_state; +struct drm_crtc_state; enum mtk_ddp_comp_type { MTK_DISP_OVL, @@ -64,7 +65,7 @@ struct mtk_ddp_comp; struct mtk_ddp_comp_funcs { void (*config)(struct mtk_ddp_comp *comp, unsigned int w, - unsigned int h, unsigned int vrefresh); + unsigned int h, unsigned int vrefresh, unsigned int bpc); void (*start)(struct mtk_ddp_comp *comp); void (*stop)(struct mtk_ddp_comp *comp); void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc); @@ -73,6 +74,8 @@ struct mtk_ddp_comp_funcs { void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx); void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx, struct mtk_plane_state *state); + void (*gamma_set)(struct mtk_ddp_comp *comp, + struct drm_crtc_state *state); }; struct mtk_ddp_comp { @@ -86,10 +89,10 @@ struct mtk_ddp_comp { static inline void mtk_ddp_comp_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, - unsigned int vrefresh) + unsigned int vrefresh, unsigned int bpc) { if (comp->funcs && comp->funcs->config) - comp->funcs->config(comp, w, h, vrefresh); + comp->funcs->config(comp, w, h, vrefresh, bpc); } static inline void mtk_ddp_comp_start(struct mtk_ddp_comp *comp) @@ -139,6 +142,13 @@ static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp, comp->funcs->layer_config(comp, idx, state); } +static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp, + struct drm_crtc_state *state) +{ + if (comp->funcs && comp->funcs->gamma_set) + comp->funcs->gamma_set(comp, state); +} + int mtk_ddp_comp_get_id(struct device_node *node, enum mtk_ddp_comp_type comp_type); int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node, @@ -146,5 +156,7 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node, const struct mtk_ddp_comp_funcs *funcs); int mtk_ddp_comp_register(struct drm_device *drm, struct mtk_ddp_comp *comp); void mtk_ddp_comp_unregister(struct drm_device *drm, struct mtk_ddp_comp *comp); +void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc, + unsigned int CFG); #endif /* MTK_DRM_DDP_COMP_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index eebb7d881c2b..72c1ae4e02d4 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -61,10 +61,27 @@ static void mtk_atomic_complete(struct mtk_drm_private *private, mtk_atomic_wait_for_fences(state); + /* + * Mediatek drm supports runtime PM, so plane registers cannot be + * written when their crtc is disabled. + * + * The comment for drm_atomic_helper_commit states: + * For drivers supporting runtime PM the recommended sequence is + * + * drm_atomic_helper_commit_modeset_disables(dev, state); + * drm_atomic_helper_commit_modeset_enables(dev, state); + * drm_atomic_helper_commit_planes(dev, state, + * DRM_PLANE_COMMIT_ACTIVE_ONLY); + * + * See the kerneldoc entries for these three functions for more details. + */ drm_atomic_helper_commit_modeset_disables(drm, state); - drm_atomic_helper_commit_planes(drm, state, false); drm_atomic_helper_commit_modeset_enables(drm, state); + drm_atomic_helper_commit_planes(drm, state, + DRM_PLANE_COMMIT_ACTIVE_ONLY); + drm_atomic_helper_wait_for_vblanks(drm, state); + drm_atomic_helper_cleanup_planes(drm, state); drm_atomic_state_free(state); } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index 3995765a90dc..c461a232cbf5 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -30,57 +30,12 @@ static const u32 formats[] = { DRM_FORMAT_RGB565, }; -static void mtk_plane_enable(struct mtk_drm_plane *mtk_plane, bool enable, - dma_addr_t addr, struct drm_rect *dest) -{ - struct drm_plane *plane = &mtk_plane->base; - struct mtk_plane_state *state = to_mtk_plane_state(plane->state); - unsigned int pitch, format; - int x, y; - - if (WARN_ON(!plane->state || (enable && !plane->state->fb))) - return; - - if (plane->state->fb) { - pitch = plane->state->fb->pitches[0]; - format = plane->state->fb->pixel_format; - } else { - pitch = 0; - format = DRM_FORMAT_RGBA8888; - } - - x = plane->state->crtc_x; - y = plane->state->crtc_y; - - if (x < 0) { - addr -= x * 4; - x = 0; - } - - if (y < 0) { - addr -= y * pitch; - y = 0; - } - - state->pending.enable = enable; - state->pending.pitch = pitch; - state->pending.format = format; - state->pending.addr = addr; - state->pending.x = x; - state->pending.y = y; - state->pending.width = dest->x2 - dest->x1; - state->pending.height = dest->y2 - dest->y1; - wmb(); /* Make sure the above parameters are set before update */ - state->pending.dirty = true; -} - static void mtk_plane_reset(struct drm_plane *plane) { struct mtk_plane_state *state; if (plane->state) { - if (plane->state->fb) - drm_framebuffer_unreference(plane->state->fb); + __drm_atomic_helper_plane_destroy_state(plane->state); state = to_mtk_plane_state(plane->state); memset(state, 0, sizeof(*state)); @@ -134,20 +89,6 @@ static int mtk_plane_atomic_check(struct drm_plane *plane, { struct drm_framebuffer *fb = state->fb; struct drm_crtc_state *crtc_state; - bool visible; - struct drm_rect dest = { - .x1 = state->crtc_x, - .y1 = state->crtc_y, - .x2 = state->crtc_x + state->crtc_w, - .y2 = state->crtc_y + state->crtc_h, - }; - struct drm_rect src = { - /* 16.16 fixed point */ - .x1 = state->src_x, - .y1 = state->src_y, - .x2 = state->src_x + state->src_w, - .y2 = state->src_y + state->src_h, - }; struct drm_rect clip = { 0, }; if (!fb) @@ -168,40 +109,45 @@ static int mtk_plane_atomic_check(struct drm_plane *plane, clip.x2 = crtc_state->mode.hdisplay; clip.y2 = crtc_state->mode.vdisplay; - return drm_plane_helper_check_update(plane, state->crtc, fb, - &src, &dest, &clip, - state->rotation, - DRM_PLANE_HELPER_NO_SCALING, - DRM_PLANE_HELPER_NO_SCALING, - true, true, &visible); + return drm_plane_helper_check_state(state, &clip, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + true, true); } static void mtk_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { struct mtk_plane_state *state = to_mtk_plane_state(plane->state); - struct drm_crtc *crtc = state->base.crtc; + struct drm_crtc *crtc = plane->state->crtc; + struct drm_framebuffer *fb = plane->state->fb; struct drm_gem_object *gem; struct mtk_drm_gem_obj *mtk_gem; - struct mtk_drm_plane *mtk_plane = to_mtk_plane(plane); - struct drm_rect dest = { - .x1 = state->base.crtc_x, - .y1 = state->base.crtc_y, - .x2 = state->base.crtc_x + state->base.crtc_w, - .y2 = state->base.crtc_y + state->base.crtc_h, - }; - struct drm_rect clip = { 0, }; + unsigned int pitch, format; + dma_addr_t addr; - if (!crtc) + if (!crtc || WARN_ON(!fb)) return; - clip.x2 = state->base.crtc->state->mode.hdisplay; - clip.y2 = state->base.crtc->state->mode.vdisplay; - drm_rect_intersect(&dest, &clip); - - gem = mtk_fb_get_gem_obj(state->base.fb); + gem = mtk_fb_get_gem_obj(fb); mtk_gem = to_mtk_gem_obj(gem); - mtk_plane_enable(mtk_plane, true, mtk_gem->dma_addr, &dest); + addr = mtk_gem->dma_addr; + pitch = fb->pitches[0]; + format = fb->pixel_format; + + addr += (plane->state->src.x1 >> 16) * drm_format_plane_cpp(format, 0); + addr += (plane->state->src.y1 >> 16) * pitch; + + state->pending.enable = true; + state->pending.pitch = pitch; + state->pending.format = format; + state->pending.addr = addr; + state->pending.x = plane->state->dst.x1; + state->pending.y = plane->state->dst.y1; + state->pending.width = drm_rect_width(&plane->state->dst); + state->pending.height = drm_rect_height(&plane->state->dst); + wmb(); /* Make sure the above parameters are set before update */ + state->pending.dirty = true; } static void mtk_plane_atomic_disable(struct drm_plane *plane, @@ -220,13 +166,12 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { .atomic_disable = mtk_plane_atomic_disable, }; -int mtk_plane_init(struct drm_device *dev, struct mtk_drm_plane *mtk_plane, - unsigned long possible_crtcs, enum drm_plane_type type, - unsigned int zpos) +int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, + unsigned long possible_crtcs, enum drm_plane_type type) { int err; - err = drm_universal_plane_init(dev, &mtk_plane->base, possible_crtcs, + err = drm_universal_plane_init(dev, plane, possible_crtcs, &mtk_plane_funcs, formats, ARRAY_SIZE(formats), type, NULL); if (err) { @@ -234,8 +179,7 @@ int mtk_plane_init(struct drm_device *dev, struct mtk_drm_plane *mtk_plane, return err; } - drm_plane_helper_add(&mtk_plane->base, &mtk_plane_helper_funcs); - mtk_plane->idx = zpos; + drm_plane_helper_add(plane, &mtk_plane_helper_funcs); return 0; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.h b/drivers/gpu/drm/mediatek/mtk_drm_plane.h index 72a7b3e4c126..6a20b49e0f2f 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.h @@ -18,11 +18,6 @@ #include <drm/drm_crtc.h> #include <linux/types.h> -struct mtk_drm_plane { - struct drm_plane base; - unsigned int idx; -}; - struct mtk_plane_pending_state { bool config; bool enable; @@ -41,19 +36,13 @@ struct mtk_plane_state { struct mtk_plane_pending_state pending; }; -static inline struct mtk_drm_plane *to_mtk_plane(struct drm_plane *plane) -{ - return container_of(plane, struct mtk_drm_plane, base); -} - static inline struct mtk_plane_state * to_mtk_plane_state(struct drm_plane_state *state) { return container_of(state, struct mtk_plane_state, base); } -int mtk_plane_init(struct drm_device *dev, struct mtk_drm_plane *mtk_plane, - unsigned long possible_crtcs, enum drm_plane_type type, - unsigned int zpos); +int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, + unsigned long possible_crtcs, enum drm_plane_type type); #endif diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c index 5e2f131a6a72..25b2a1a424e6 100644 --- a/drivers/gpu/drm/mga/mga_drv.c +++ b/drivers/gpu/drm/mga/mga_drv.c @@ -58,7 +58,7 @@ static const struct file_operations mga_driver_fops = { static struct drm_driver driver = { .driver_features = - DRIVER_USE_AGP | DRIVER_PCI_DMA | + DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_LEGACY | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, .dev_priv_size = sizeof(drm_mga_buf_priv_t), .load = mga_driver_load, diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 2b4b125eebc3..1443b3a34775 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -56,7 +56,7 @@ static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev) #ifdef CONFIG_X86 primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; #endif - remove_conflicting_framebuffers(ap, "mgag200drmfb", primary); + drm_fb_helper_remove_conflicting_framebuffers(ap, "mgag200drmfb", primary); kfree(ap); } diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c index d9b04b008feb..88dd2214114d 100644 --- a/drivers/gpu/drm/mgag200/mgag200_fb.c +++ b/drivers/gpu/drm/mgag200/mgag200_fb.c @@ -15,8 +15,6 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_crtc_helper.h> -#include <linux/fb.h> - #include "mgag200_drv.h" static void mga_dirty_update(struct mga_fbdev *mfbdev, @@ -185,8 +183,10 @@ static int mgag200fb_create(struct drm_fb_helper *helper, } sysram = vmalloc(size); - if (!sysram) + if (!sysram) { + ret = -ENOMEM; goto err_sysram; + } info = drm_fb_helper_alloc_fbi(helper); if (IS_ERR(info)) { diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c index 13798b3e6beb..e79cbc25ae3c 100644 --- a/drivers/gpu/drm/mgag200/mgag200_main.c +++ b/drivers/gpu/drm/mgag200/mgag200_main.c @@ -135,7 +135,7 @@ static int mga_vram_init(struct mga_device *mdev) aper->ranges[0].base = mdev->mc.vram_base; aper->ranges[0].size = mdev->mc.vram_window; - remove_conflicting_framebuffers(aper, "mgafb", true); + drm_fb_helper_remove_conflicting_framebuffers(aper, "mgafb", true); kfree(aper); if (!devm_request_mem_region(mdev->dev->dev, mdev->mc.vram_base, mdev->mc.vram_window, diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c index 9f96dfe67769..7c9626d92019 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c @@ -99,7 +99,7 @@ static const struct drm_plane_funcs mdp4_plane_funcs = { }; static int mdp4_plane_prepare_fb(struct drm_plane *plane, - const struct drm_plane_state *new_state) + struct drm_plane_state *new_state) { struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); struct mdp4_kms *mdp4_kms = get_kms(plane); @@ -113,7 +113,7 @@ static int mdp4_plane_prepare_fb(struct drm_plane *plane, } static void mdp4_plane_cleanup_fb(struct drm_plane *plane, - const struct drm_plane_state *old_state) + struct drm_plane_state *old_state) { struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); struct mdp4_kms *mdp4_kms = get_kms(plane); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 432c09836b0e..ba8f43278a44 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -78,7 +78,7 @@ static void mdp5_plane_install_rotation_property(struct drm_device *dev, if (!dev->mode_config.rotation_property) dev->mode_config.rotation_property = drm_mode_create_rotation_property(dev, - BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y)); + DRM_REFLECT_X | DRM_REFLECT_Y); if (dev->mode_config.rotation_property) drm_object_attach_property(&plane->base, @@ -250,7 +250,7 @@ static const struct drm_plane_funcs mdp5_plane_funcs = { }; static int mdp5_plane_prepare_fb(struct drm_plane *plane, - const struct drm_plane_state *new_state) + struct drm_plane_state *new_state) { struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct mdp5_kms *mdp5_kms = get_kms(plane); @@ -264,7 +264,7 @@ static int mdp5_plane_prepare_fb(struct drm_plane *plane, } static void mdp5_plane_cleanup_fb(struct drm_plane *plane, - const struct drm_plane_state *old_state) + struct drm_plane_state *old_state) { struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct mdp5_kms *mdp5_kms = get_kms(plane); @@ -309,8 +309,8 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, return -EINVAL; } - hflip = !!(state->rotation & BIT(DRM_REFLECT_X)); - vflip = !!(state->rotation & BIT(DRM_REFLECT_Y)); + hflip = !!(state->rotation & DRM_REFLECT_X); + vflip = !!(state->rotation & DRM_REFLECT_Y); if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) || (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) { dev_err(plane->dev->dev, @@ -743,8 +743,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, config |= get_scale_config(format, src_h, crtc_h, false); DBG("scale config = %x", config); - hflip = !!(pstate->rotation & BIT(DRM_REFLECT_X)); - vflip = !!(pstate->rotation & BIT(DRM_REFLECT_Y)); + hflip = !!(pstate->rotation & DRM_REFLECT_X); + vflip = !!(pstate->rotation & DRM_REFLECT_Y); spin_lock_irqsave(&mdp5_plane->pipe_lock, flags); diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 4a8a6f1f1151..5df252cebf1c 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -118,7 +118,7 @@ static void complete_commit(struct msm_commit *c, bool async) drm_atomic_helper_commit_modeset_disables(dev, state); - drm_atomic_helper_commit_planes(dev, state, false); + drm_atomic_helper_commit_planes(dev, state, 0); drm_atomic_helper_commit_modeset_enables(dev, state); diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 528bdeffb339..8ab9ce5089fe 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1151,7 +1151,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, if (ret) goto out; - ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); + ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_mem); out: ttm_bo_mem_put(bo, &tmp_mem); return ret; @@ -1179,7 +1179,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, if (ret) return ret; - ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); + ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_mem); if (ret) goto out; @@ -1297,7 +1297,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, /* Fallback to software copy. */ ret = ttm_bo_wait(bo, intr, no_wait_gpu); if (ret == 0) - ret = ttm_bo_move_memcpy(bo, evict, intr, no_wait_gpu, new_mem); + ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_mem); out: if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 66c1280c0f1f..652ab111dd74 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -351,7 +351,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev, boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; #endif if (nouveau_modeset != 2) - remove_conflicting_framebuffers(aper, "nouveaufb", boot); + drm_fb_helper_remove_conflicting_framebuffers(aper, "nouveaufb", boot); kfree(aper); ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug, diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index d1f248fd3506..9f5692726c16 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -32,7 +32,6 @@ #include <linux/tty.h> #include <linux/sysrq.h> #include <linux/delay.h> -#include <linux/fb.h> #include <linux/init.h> #include <linux/screen_info.h> #include <linux/vga_switcheroo.h> diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index 0eae8afaed90..b1f3b818edf4 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -13,7 +13,6 @@ #include <linux/backlight.h> #include <linux/delay.h> -#include <linux/fb.h> #include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/jiffies.h> diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index fc4c238c9583..9f3d6f48f3e1 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -14,7 +14,6 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/spi/spi.h> -#include <linux/fb.h> #include <linux/gpio/consumer.h> #include <linux/of_gpio.h> diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index 157c512205d1..3557a4c7dd7b 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -28,7 +28,6 @@ #include <linux/jiffies.h> #include <linux/sched.h> #include <linux/backlight.h> -#include <linux/fb.h> #include <linux/gpio/consumer.h> #include <linux/of.h> #include <linux/of_gpio.h> diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c index e256d879b25c..dfd4e9621e3b 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss-of.c +++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c @@ -125,16 +125,15 @@ u32 dss_of_port_get_port_number(struct device_node *port) static struct device_node *omapdss_of_get_remote_port(const struct device_node *node) { - struct device_node *np, *np_parent; + struct device_node *np; np = of_parse_phandle(node, "remote-endpoint", 0); if (!np) return NULL; - np_parent = of_get_next_parent(np); - of_node_put(np); + np = of_get_next_parent(np); - return np_parent; + return np; } struct device_node * diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 26c6134eb744..e1cfba51cff6 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -96,7 +96,7 @@ static void omap_atomic_complete(struct omap_atomic_state_commit *commit) dispc_runtime_get(); drm_atomic_helper_commit_modeset_disables(dev, old_state); - drm_atomic_helper_commit_planes(dev, old_state, false); + drm_atomic_helper_commit_planes(dev, old_state, 0); drm_atomic_helper_commit_modeset_enables(dev, old_state); omap_atomic_wait_for_completion(dev, old_state); @@ -295,9 +295,9 @@ static int omap_modeset_init_properties(struct drm_device *dev) if (priv->has_dmm) { dev->mode_config.rotation_property = drm_mode_create_rotation_property(dev, - BIT(DRM_ROTATE_0) | BIT(DRM_ROTATE_90) | - BIT(DRM_ROTATE_180) | BIT(DRM_ROTATE_270) | - BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y)); + DRM_ROTATE_0 | DRM_ROTATE_90 | + DRM_ROTATE_180 | DRM_ROTATE_270 | + DRM_REFLECT_X | DRM_REFLECT_Y); if (!dev->mode_config.rotation_property) return -ENOMEM; } diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index 31f5178c22c7..5f3337f1e9aa 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c @@ -179,24 +179,24 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, (uint32_t)win->rotation); /* fallthru to default to no rotation */ case 0: - case BIT(DRM_ROTATE_0): + case DRM_ROTATE_0: orient = 0; break; - case BIT(DRM_ROTATE_90): + case DRM_ROTATE_90: orient = MASK_XY_FLIP | MASK_X_INVERT; break; - case BIT(DRM_ROTATE_180): + case DRM_ROTATE_180: orient = MASK_X_INVERT | MASK_Y_INVERT; break; - case BIT(DRM_ROTATE_270): + case DRM_ROTATE_270: orient = MASK_XY_FLIP | MASK_Y_INVERT; break; } - if (win->rotation & BIT(DRM_REFLECT_X)) + if (win->rotation & DRM_REFLECT_X) orient ^= MASK_X_INVERT; - if (win->rotation & BIT(DRM_REFLECT_Y)) + if (win->rotation & DRM_REFLECT_Y) orient ^= MASK_Y_INVERT; /* adjust x,y offset for flip/invert: */ @@ -213,7 +213,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, } else { switch (win->rotation & DRM_ROTATE_MASK) { case 0: - case BIT(DRM_ROTATE_0): + case DRM_ROTATE_0: /* OK */ break; diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c index 5252ab720e70..66ac8c40db26 100644 --- a/drivers/gpu/drm/omapdrm/omap_plane.c +++ b/drivers/gpu/drm/omapdrm/omap_plane.c @@ -60,7 +60,7 @@ to_omap_plane_state(struct drm_plane_state *state) } static int omap_plane_prepare_fb(struct drm_plane *plane, - const struct drm_plane_state *new_state) + struct drm_plane_state *new_state) { if (!new_state->fb) return 0; @@ -69,7 +69,7 @@ static int omap_plane_prepare_fb(struct drm_plane *plane, } static void omap_plane_cleanup_fb(struct drm_plane *plane, - const struct drm_plane_state *old_state) + struct drm_plane_state *old_state) { if (old_state->fb) omap_framebuffer_unpin(old_state->fb); @@ -109,8 +109,8 @@ static void omap_plane_atomic_update(struct drm_plane *plane, win.src_y = state->src_y >> 16; switch (state->rotation & DRM_ROTATE_MASK) { - case BIT(DRM_ROTATE_90): - case BIT(DRM_ROTATE_270): + case DRM_ROTATE_90: + case DRM_ROTATE_270: win.src_w = state->src_h >> 16; win.src_h = state->src_w >> 16; break; @@ -149,7 +149,7 @@ static void omap_plane_atomic_disable(struct drm_plane *plane, struct omap_plane_state *omap_state = to_omap_plane_state(plane->state); struct omap_plane *omap_plane = to_omap_plane(plane); - plane->state->rotation = BIT(DRM_ROTATE_0); + plane->state->rotation = DRM_ROTATE_0; omap_state->zorder = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : omap_plane->id; @@ -178,7 +178,7 @@ static int omap_plane_atomic_check(struct drm_plane *plane, return -EINVAL; if (state->fb) { - if (state->rotation != BIT(DRM_ROTATE_0) && + if (state->rotation != DRM_ROTATE_0 && !omap_framebuffer_supports_rotation(state->fb)) return -EINVAL; } @@ -269,7 +269,7 @@ static void omap_plane_reset(struct drm_plane *plane) */ omap_state->zorder = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : omap_plane->id; - omap_state->base.rotation = BIT(DRM_ROTATE_0); + omap_state->base.rotation = DRM_ROTATE_0; plane->state = &omap_state->base; plane->state->plane = plane; diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index df2657051afd..2cd879a4ae15 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c @@ -24,7 +24,6 @@ * David Airlie */ #include <linux/module.h> -#include <linux/fb.h> #include "drmP.h" #include "drm/drm.h" @@ -73,10 +72,12 @@ static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image, } } +#ifdef CONFIG_DRM_FBDEV_EMULATION static struct fb_deferred_io qxl_defio = { .delay = QXL_DIRTY_DELAY, .deferred_io = drm_fb_helper_deferred_io, }; +#endif static struct fb_ops qxlfb_ops = { .owner = THIS_MODULE, @@ -313,8 +314,10 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev, goto out_destroy_fbi; } +#ifdef CONFIG_DRM_FBDEV_EMULATION info->fbdefio = &qxl_defio; fb_deferred_io_init(info); +#endif qdev->fbdev_info = info; qdev->fbdev_qfb = &qfbdev->qfb; diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index d50c9679e631..6a22de045cb5 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -361,8 +361,8 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, qxl_move_null(bo, new_mem); return 0; } - return ttm_bo_move_memcpy(bo, evict, interruptible, - no_wait_gpu, new_mem); + return ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, + new_mem); } static void qxl_bo_move_notify(struct ttm_buffer_object *bo, diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c index c57b4de63caf..a982be57d1ef 100644 --- a/drivers/gpu/drm/r128/r128_drv.c +++ b/drivers/gpu/drm/r128/r128_drv.c @@ -56,7 +56,7 @@ static const struct file_operations r128_driver_fops = { static struct drm_driver driver = { .driver_features = - DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | + DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_LEGACY | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, .dev_priv_size = sizeof(drm_r128_buf_priv_t), .load = r128_driver_load, diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index a97abc8af657..4824f70b0258 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1154,6 +1154,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, u32 tmp, viewport_w, viewport_h; int r; bool bypass_lut = false; + char *format_name; /* no fb bound */ if (!atomic && !crtc->primary->fb) { @@ -1257,8 +1258,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, bypass_lut = true; break; default: - DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->pixel_format)); + format_name = drm_get_format_name(target_fb->pixel_format); + DRM_ERROR("Unsupported screen format %s\n", format_name); + kfree(format_name); return -EINVAL; } @@ -1433,8 +1435,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, (viewport_w << 16) | viewport_h); - /* set pageflip to happen only at start of vblank interval (front porch) */ - WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3); + /* set pageflip to happen anywhere in vblank interval */ + WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->primary->fb) { radeon_fb = to_radeon_framebuffer(fb); @@ -1469,6 +1471,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, u32 viewport_w, viewport_h; int r; bool bypass_lut = false; + char *format_name; /* no fb bound */ if (!atomic && !crtc->primary->fb) { @@ -1558,8 +1561,9 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, bypass_lut = true; break; default: - DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->pixel_format)); + format_name = drm_get_format_name(target_fb->pixel_format); + DRM_ERROR("Unsupported screen format %s\n", format_name); + kfree(format_name); return -EINVAL; } @@ -1632,8 +1636,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset, (viewport_w << 16) | viewport_h); - /* set pageflip to happen only at start of vblank interval (front porch) */ - WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3); + /* set pageflip to happen anywhere in vblank interval */ + WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->primary->fb) { radeon_fb = to_radeon_framebuffer(fb); diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index cead089a9e7d..432cb46f6a34 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -389,22 +389,21 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) { struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; u8 msg[DP_DPCD_SIZE]; - int ret, i; + int ret; - for (i = 0; i < 7; i++) { - ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg, - DP_DPCD_SIZE); - if (ret == DP_DPCD_SIZE) { - memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); + ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg, + DP_DPCD_SIZE); + if (ret == DP_DPCD_SIZE) { + memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); - DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), - dig_connector->dpcd); + DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), + dig_connector->dpcd); - radeon_dp_probe_oui(radeon_connector); + radeon_dp_probe_oui(radeon_connector); - return true; - } + return true; } + dig_connector->dpcd[0] = 0; return false; } diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 0c1b9ff433af..b1784a1b482a 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -1871,7 +1871,7 @@ int ci_mc_load_microcode(struct radeon_device *rdev) { const __be32 *fw_data = NULL; const __le32 *new_fw_data = NULL; - u32 running, blackout = 0, tmp; + u32 running, tmp; u32 *io_mc_regs = NULL; const __le32 *new_io_mc_regs = NULL; int i, regs_size, ucode_size; @@ -1912,11 +1912,6 @@ int ci_mc_load_microcode(struct radeon_device *rdev) running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; if (running == 0) { - if (running) { - blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); - WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1); - } - /* reset the engine and set to writable */ WREG32(MC_SEQ_SUP_CNTL, 0x00000008); WREG32(MC_SEQ_SUP_CNTL, 0x00000010); @@ -1964,9 +1959,6 @@ int ci_mc_load_microcode(struct radeon_device *rdev) break; udelay(1); } - - if (running) - WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); } return 0; @@ -8215,7 +8207,7 @@ static void cik_uvd_resume(struct radeon_device *rdev) return; ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2); + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0)); if (r) { dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r); return; diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index cead2284fd79..48db93577c1d 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -2069,6 +2069,7 @@ #define UVD_UDEC_ADDR_CONFIG 0xef4c #define UVD_UDEC_DB_ADDR_CONFIG 0xef50 #define UVD_UDEC_DBW_ADDR_CONFIG 0xef54 +#define UVD_NO_OP 0xeffc #define UVD_LMI_EXT40_ADDR 0xf498 #define UVD_GP_SCRATCH4 0xf4e0 diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index db275b7ed34a..0b6b5766216f 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2878,9 +2878,8 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s for (i = 0; i < rdev->num_crtc; i++) { if (save->crtc_enabled[i]) { tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]); - if ((tmp & 0x7) != 3) { + if ((tmp & 0x7) != 0) { tmp &= ~0x7; - tmp |= 0x3; WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); } tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); @@ -5580,7 +5579,7 @@ static void evergreen_uvd_resume(struct radeon_device *rdev) return; ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2); + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0)); if (r) { dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r); return; diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index c8e3d394cde7..f3d88ca2aa8f 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -1523,6 +1523,7 @@ #define UVD_UDEC_ADDR_CONFIG 0xef4c #define UVD_UDEC_DB_ADDR_CONFIG 0xef50 #define UVD_UDEC_DBW_ADDR_CONFIG 0xef54 +#define UVD_NO_OP 0xeffc #define UVD_RBC_RB_RPTR 0xf690 #define UVD_RBC_RB_WPTR 0xf694 #define UVD_STATUS 0xf6bc diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 4a3d7cab83f7..103fc8650197 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -2062,7 +2062,7 @@ static void cayman_uvd_resume(struct radeon_device *rdev) return; ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2); + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0)); if (r) { dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r); return; diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h index 47eb49b77d32..3c9fec88ea44 100644 --- a/drivers/gpu/drm/radeon/nid.h +++ b/drivers/gpu/drm/radeon/nid.h @@ -1137,6 +1137,7 @@ #define UVD_UDEC_ADDR_CONFIG 0xEF4C #define UVD_UDEC_DB_ADDR_CONFIG 0xEF50 #define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54 +#define UVD_NO_OP 0xEFFC #define UVD_RBC_RB_RPTR 0xF690 #define UVD_RBC_RB_WPTR 0xF694 #define UVD_STATUS 0xf6bc diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 9247e7d207fe..640653606f86 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3097,7 +3097,7 @@ static void r600_uvd_resume(struct radeon_device *rdev) return; ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2); + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0)); if (r) { dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r); return; diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 1e8495cca41e..2e00a5287bd2 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -1490,6 +1490,7 @@ #define UVD_GPCOM_VCPU_DATA0 0xef10 #define UVD_GPCOM_VCPU_DATA1 0xef14 #define UVD_ENGINE_CNTL 0xef18 +#define UVD_NO_OP 0xeffc #define UVD_SEMA_CNTL 0xf400 #define UVD_RB_ARB_CTRL 0xf480 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 5633ee3eb46e..1b0dcad916b0 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -742,6 +742,7 @@ struct radeon_flip_work { struct work_struct unpin_work; struct radeon_device *rdev; int crtc_id; + u32 target_vblank; uint64_t base; struct drm_pending_vblank_event *event; struct radeon_bo *old_rbo; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index c3206fb8f4cf..890171f08987 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -400,14 +400,13 @@ static void radeon_flip_work_func(struct work_struct *__work) struct radeon_flip_work *work = container_of(__work, struct radeon_flip_work, flip_work); struct radeon_device *rdev = work->rdev; + struct drm_device *dev = rdev->ddev; struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id]; struct drm_crtc *crtc = &radeon_crtc->base; unsigned long flags; int r; - int vpos, hpos, stat, min_udelay = 0; - unsigned repcnt = 4; - struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; + int vpos, hpos; down_read(&rdev->exclusive_lock); if (work->fence) { @@ -438,59 +437,25 @@ static void radeon_flip_work_func(struct work_struct *__work) work->fence = NULL; } + /* Wait until we're out of the vertical blank period before the one + * targeted by the flip + */ + while (radeon_crtc->enabled && + (radeon_get_crtc_scanoutpos(dev, work->crtc_id, 0, + &vpos, &hpos, NULL, NULL, + &crtc->hwmode) + & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == + (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && + (int)(work->target_vblank - + dev->driver->get_vblank_counter(dev, work->crtc_id)) > 0) + usleep_range(1000, 2000); + /* We borrow the event spin lock for protecting flip_status */ spin_lock_irqsave(&crtc->dev->event_lock, flags); /* set the proper interrupt */ radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); - /* If this happens to execute within the "virtually extended" vblank - * interval before the start of the real vblank interval then it needs - * to delay programming the mmio flip until the real vblank is entered. - * This prevents completing a flip too early due to the way we fudge - * our vblank counter and vblank timestamps in order to work around the - * problem that the hw fires vblank interrupts before actual start of - * vblank (when line buffer refilling is done for a frame). It - * complements the fudging logic in radeon_get_crtc_scanoutpos() for - * timestamping and radeon_get_vblank_counter_kms() for vblank counts. - * - * In practice this won't execute very often unless on very fast - * machines because the time window for this to happen is very small. - */ - while (radeon_crtc->enabled && --repcnt) { - /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank - * start in hpos, and to the "fudged earlier" vblank start in - * vpos. - */ - stat = radeon_get_crtc_scanoutpos(rdev->ddev, work->crtc_id, - GET_DISTANCE_TO_VBLANKSTART, - &vpos, &hpos, NULL, NULL, - &crtc->hwmode); - - if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != - (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) || - !(vpos >= 0 && hpos <= 0)) - break; - - /* Sleep at least until estimated real start of hw vblank */ - min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); - if (min_udelay > vblank->framedur_ns / 2000) { - /* Don't wait ridiculously long - something is wrong */ - repcnt = 0; - break; - } - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - usleep_range(min_udelay, 2 * min_udelay); - spin_lock_irqsave(&crtc->dev->event_lock, flags); - }; - - if (!repcnt) - DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, " - "framedur %d, linedur %d, stat %d, vpos %d, " - "hpos %d\n", work->crtc_id, min_udelay, - vblank->framedur_ns / 1000, - vblank->linedur_ns / 1000, stat, vpos, hpos); - /* do the flip (mmio) */ radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base, work->async); @@ -499,10 +464,11 @@ static void radeon_flip_work_func(struct work_struct *__work) up_read(&rdev->exclusive_lock); } -static int radeon_crtc_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags) +static int radeon_crtc_page_flip_target(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags, + uint32_t target) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; @@ -599,12 +565,8 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, base &= ~7; } work->base = base; - - r = drm_crtc_vblank_get(crtc); - if (r) { - DRM_ERROR("failed to get vblank before flip\n"); - goto pflip_cleanup; - } + work->target_vblank = target - drm_crtc_vblank_count(crtc) + + dev->driver->get_vblank_counter(dev, work->crtc_id); /* We borrow the event spin lock for protecting flip_work */ spin_lock_irqsave(&crtc->dev->event_lock, flags); @@ -613,7 +575,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); spin_unlock_irqrestore(&crtc->dev->event_lock, flags); r = -EBUSY; - goto vblank_cleanup; + goto pflip_cleanup; } radeon_crtc->flip_status = RADEON_FLIP_PENDING; radeon_crtc->flip_work = work; @@ -626,9 +588,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, queue_work(radeon_crtc->flip_queue, &work->flip_work); return 0; -vblank_cleanup: - drm_crtc_vblank_put(crtc); - pflip_cleanup: if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) { DRM_ERROR("failed to reserve new rbo in error path\n"); @@ -697,7 +656,7 @@ static const struct drm_crtc_funcs radeon_crtc_funcs = { .gamma_set = radeon_crtc_gamma_set, .set_config = radeon_crtc_set_config, .destroy = radeon_crtc_destroy, - .page_flip = radeon_crtc_page_flip, + .page_flip_target = radeon_crtc_page_flip_target, }; static void radeon_crtc_init(struct drm_device *dev, int index) diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c index db64e0062689..2d465648856a 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c +++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c @@ -164,7 +164,6 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg } if (tmp & AUX_SW_RX_TIMEOUT) { - DRM_DEBUG_KMS("dp_aux_ch timed out\n"); ret = -ETIMEDOUT; goto done; } diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index c01a7c6abb49..07e44931f1f1 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -39,6 +39,7 @@ #include <linux/pm_runtime.h> #include <linux/vga_switcheroo.h> #include <drm/drm_gem.h> +#include <drm/drm_fb_helper.h> #include "drm_crtc_helper.h" #include "radeon_kfd.h" @@ -94,9 +95,10 @@ * 2.44.0 - SET_APPEND_CNT packet3 support * 2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI * 2.46.0 - Add PFP_SYNC_ME support on evergreen + * 2.47.0 - Add UVD_NO_OP register support */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 46 +#define KMS_DRIVER_MINOR 47 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); @@ -324,7 +326,7 @@ static int radeon_kick_out_firmware_fb(struct pci_dev *pdev) #ifdef CONFIG_X86 primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; #endif - remove_conflicting_framebuffers(ap, "radeondrmfb", primary); + drm_fb_helper_remove_conflicting_framebuffers(ap, "radeondrmfb", primary); kfree(ap); return 0; diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 0e3143acb565..568e036d547e 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -25,7 +25,6 @@ */ #include <linux/module.h> #include <linux/slab.h> -#include <linux/fb.h> #include <drm/drmP.h> #include <drm/drm_crtc.h> @@ -383,7 +382,7 @@ void radeon_fbdev_fini(struct radeon_device *rdev) void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state) { if (rdev->mode_info.rfbdev) - fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state); + drm_fb_helper_set_suspend(&rdev->mode_info.rfbdev->helper, state); } bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 9590bcd321c0..021aa005623f 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -938,10 +938,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, "Radeon i2c hw bus %s", name); i2c->adapter.algo = &radeon_i2c_algo; ret = i2c_add_adapter(&i2c->adapter); - if (ret) { - DRM_ERROR("Failed to register hw i2c %s\n", name); + if (ret) goto out_free; - } } else if (rec->hw_capable && radeon_hw_i2c && ASIC_IS_DCE3(rdev)) { @@ -950,10 +948,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, "Radeon i2c hw bus %s", name); i2c->adapter.algo = &radeon_atom_i2c_algo; ret = i2c_add_adapter(&i2c->adapter); - if (ret) { - DRM_ERROR("Failed to register hw i2c %s\n", name); + if (ret) goto out_free; - } } else { /* set the radeon bit adapter */ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index ffdad81ef964..93414aca60d6 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -346,7 +346,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, if (unlikely(r)) { goto out_cleanup; } - r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); + r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem); out_cleanup: ttm_bo_mem_put(bo, &tmp_mem); return r; @@ -379,7 +379,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, if (unlikely(r)) { return r; } - r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); + r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } @@ -444,8 +444,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, if (r) { memcpy: - r = ttm_bo_move_memcpy(bo, evict, interruptible, - no_wait_gpu, new_mem); + r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem); if (r) { return r; } diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 73dfe01435ea..0cd0e7bdee55 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -669,6 +669,7 @@ static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, return r; break; case UVD_ENGINE_CNTL: + case UVD_NO_OP: break; default: DRM_ERROR("Invalid reg 0x%X!\n", @@ -753,8 +754,10 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev, ib.ptr[3] = addr >> 32; ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0); ib.ptr[5] = 0; - for (i = 6; i < 16; ++i) - ib.ptr[i] = PACKET2(0); + for (i = 6; i < 16; i += 2) { + ib.ptr[i] = PACKET0(UVD_NO_OP, 0); + ib.ptr[i+1] = 0; + } ib.length_dw = 16; r = radeon_ib_schedule(rdev, &ib, NULL, false); diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index c55d653aaf5f..76c55c5d11ec 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -406,9 +406,8 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) for (i = 0; i < rdev->num_crtc; i++) { if (save->crtc_enabled[i]) { tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]); - if ((tmp & 0x7) != 3) { + if ((tmp & 0x7) != 0) { tmp &= ~0x7; - tmp |= 0x3; WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); } tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 1c120a4c3c97..729ae588c970 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -1738,7 +1738,7 @@ static void rv770_uvd_resume(struct radeon_device *rdev) return; ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2); + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0)); if (r) { dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r); return; diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index 9ef2064b1c9c..0271f4c559ae 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h @@ -387,6 +387,7 @@ #define UVD_UDEC_TILING_CONFIG 0xef40 #define UVD_UDEC_DB_TILING_CONFIG 0xef44 #define UVD_UDEC_DBW_TILING_CONFIG 0xef48 +#define UVD_NO_OP 0xeffc #define GC_USER_SHADER_PIPE_CONFIG 0x8954 #define INACTIVE_QD_PIPES(x) ((x) << 8) diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 2523ca96c6c7..7ee9aafbdf74 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -1547,7 +1547,7 @@ int si_mc_load_microcode(struct radeon_device *rdev) { const __be32 *fw_data = NULL; const __le32 *new_fw_data = NULL; - u32 running, blackout = 0; + u32 running; u32 *io_mc_regs = NULL; const __le32 *new_io_mc_regs = NULL; int i, regs_size, ucode_size; @@ -1598,11 +1598,6 @@ int si_mc_load_microcode(struct radeon_device *rdev) running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; if (running == 0) { - if (running) { - blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); - WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1); - } - /* reset the engine and set to writable */ WREG32(MC_SEQ_SUP_CNTL, 0x00000008); WREG32(MC_SEQ_SUP_CNTL, 0x00000010); @@ -1641,9 +1636,6 @@ int si_mc_load_microcode(struct radeon_device *rdev) break; udelay(1); } - - if (running) - WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); } return 0; @@ -6928,7 +6920,7 @@ static void si_uvd_resume(struct radeon_device *rdev) return; ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2); + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0)); if (r) { dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r); return; diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index d1a7b58dd291..eb220eecba78 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -1559,6 +1559,7 @@ #define UVD_UDEC_ADDR_CONFIG 0xEF4C #define UVD_UDEC_DB_ADDR_CONFIG 0xEF50 #define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54 +#define UVD_NO_OP 0xEFFC #define UVD_RBC_RB_RPTR 0xF690 #define UVD_RBC_RB_WPTR 0xF694 #define UVD_STATUS 0xf6bc diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c index 4de3ff0dbebd..e03004f4588d 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c @@ -125,6 +125,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu, /* Link drm_bridge to encoder */ bridge->encoder = encoder; + encoder->bridge = bridge; ret = drm_bridge_attach(rcdu->ddev, bridge); if (ret) { diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index f03eb55318c1..bd9c3bb9252c 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -257,7 +257,8 @@ static void rcar_du_atomic_complete(struct rcar_du_commit *commit) /* Apply the atomic update. */ drm_atomic_helper_commit_modeset_disables(dev, old_state); drm_atomic_helper_commit_modeset_enables(dev, old_state); - drm_atomic_helper_commit_planes(dev, old_state, true); + drm_atomic_helper_commit_planes(dev, old_state, + DRM_PLANE_COMMIT_ACTIVE_ONLY); drm_atomic_helper_wait_for_vblanks(dev, old_state); diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile index 05d07138a2b2..9746365694ba 100644 --- a/drivers/gpu/drm/rockchip/Makefile +++ b/drivers/gpu/drm/rockchip/Makefile @@ -3,7 +3,7 @@ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \ - rockchip_drm_gem.o rockchip_drm_vop.o + rockchip_drm_gem.o rockchip_drm_psr.o rockchip_drm_vop.o rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o obj-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c index 89aadbf465f8..e83be157cc2a 100644 --- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c +++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c @@ -32,6 +32,7 @@ #include <drm/bridge/analogix_dp.h> #include "rockchip_drm_drv.h" +#include "rockchip_drm_psr.h" #include "rockchip_drm_vop.h" #define RK3288_GRF_SOC_CON6 0x25c @@ -41,6 +42,8 @@ #define HIWORD_UPDATE(val, mask) (val | (mask) << 16) +#define PSR_WAIT_LINE_FLAG_TIMEOUT_MS 100 + #define to_dp(nm) container_of(nm, struct rockchip_dp_device, nm) /** @@ -68,11 +71,62 @@ struct rockchip_dp_device { struct regmap *grf; struct reset_control *rst; + struct work_struct psr_work; + spinlock_t psr_lock; + unsigned int psr_state; + const struct rockchip_dp_chip_data *data; struct analogix_dp_plat_data plat_data; }; +static void analogix_dp_psr_set(struct drm_encoder *encoder, bool enabled) +{ + struct rockchip_dp_device *dp = to_dp(encoder); + unsigned long flags; + + dev_dbg(dp->dev, "%s PSR...\n", enabled ? "Entry" : "Exit"); + + spin_lock_irqsave(&dp->psr_lock, flags); + if (enabled) + dp->psr_state = EDP_VSC_PSR_STATE_ACTIVE; + else + dp->psr_state = ~EDP_VSC_PSR_STATE_ACTIVE; + + schedule_work(&dp->psr_work); + spin_unlock_irqrestore(&dp->psr_lock, flags); +} + +static void analogix_dp_psr_work(struct work_struct *work) +{ + struct rockchip_dp_device *dp = + container_of(work, typeof(*dp), psr_work); + struct drm_crtc *crtc = dp->encoder.crtc; + int psr_state = dp->psr_state; + int vact_end; + int ret; + unsigned long flags; + + if (!crtc) + return; + + vact_end = crtc->mode.vtotal - crtc->mode.vsync_start + crtc->mode.vdisplay; + + ret = rockchip_drm_wait_line_flag(dp->encoder.crtc, vact_end, + PSR_WAIT_LINE_FLAG_TIMEOUT_MS); + if (ret) { + dev_err(dp->dev, "line flag interrupt did not arrive\n"); + return; + } + + spin_lock_irqsave(&dp->psr_lock, flags); + if (psr_state == EDP_VSC_PSR_STATE_ACTIVE) + analogix_dp_enable_psr(dp->dev); + else + analogix_dp_disable_psr(dp->dev); + spin_unlock_irqrestore(&dp->psr_lock, flags); +} + static int rockchip_dp_pre_init(struct rockchip_dp_device *dp) { reset_control_assert(dp->rst); @@ -87,6 +141,8 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data) struct rockchip_dp_device *dp = to_dp(plat_data); int ret; + cancel_work_sync(&dp->psr_work); + ret = clk_prepare_enable(dp->pclk); if (ret < 0) { dev_err(dp->dev, "failed to enable pclk %d\n", ret); @@ -342,12 +398,22 @@ static int rockchip_dp_bind(struct device *dev, struct device *master, dp->plat_data.power_off = rockchip_dp_powerdown; dp->plat_data.get_modes = rockchip_dp_get_modes; + spin_lock_init(&dp->psr_lock); + dp->psr_state = ~EDP_VSC_PSR_STATE_ACTIVE; + INIT_WORK(&dp->psr_work, analogix_dp_psr_work); + + rockchip_drm_psr_register(&dp->encoder, analogix_dp_psr_set); + return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data); } static void rockchip_dp_unbind(struct device *dev, struct device *master, void *data) { + struct rockchip_dp_device *dp = dev_get_drvdata(dev); + + rockchip_drm_psr_unregister(&dp->encoder); + return analogix_dp_unbind(dev, master, data); } @@ -381,10 +447,8 @@ static int rockchip_dp_probe(struct platform_device *pdev) panel = of_drm_find_panel(panel_node); of_node_put(panel_node); - if (!panel) { - DRM_ERROR("failed to find panel\n"); + if (!panel) return -EPROBE_DEFER; - } } dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); @@ -445,7 +509,6 @@ static struct platform_driver rockchip_dp_driver = { .remove = rockchip_dp_remove, .driver = { .name = "rockchip-dp", - .owner = THIS_MODULE, .pm = &rockchip_dp_pm_ops, .of_match_table = of_match_ptr(rockchip_dp_dt_ids), }, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index a822d49a255a..76eaf1de52e4 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -156,6 +156,9 @@ static int rockchip_drm_bind(struct device *dev) drm_dev->dev_private = private; + INIT_LIST_HEAD(&private->psr_list); + spin_lock_init(&private->psr_list_lock); + drm_mode_config_init(drm_dev); rockchip_drm_mode_config_init(drm_dev); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h index ea3932940061..5c698456aa1c 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h @@ -61,6 +61,9 @@ struct rockchip_drm_private { struct drm_gem_object *fbdev_bo; const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC]; struct drm_atomic_state *state; + + struct list_head psr_list; + spinlock_t psr_list_lock; }; int rockchip_register_crtc_funcs(struct drm_crtc *crtc, @@ -70,4 +73,7 @@ int rockchip_drm_dma_attach_device(struct drm_device *drm_dev, struct device *dev); void rockchip_drm_dma_detach_device(struct drm_device *drm_dev, struct device *dev); +int rockchip_drm_wait_line_flag(struct drm_crtc *crtc, unsigned int line_num, + unsigned int mstimeout); + #endif /* _ROCKCHIP_DRM_DRV_H_ */ diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c index 55c52734c52d..60bcc48f84b9 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c @@ -22,6 +22,7 @@ #include "rockchip_drm_drv.h" #include "rockchip_drm_fb.h" #include "rockchip_drm_gem.h" +#include "rockchip_drm_psr.h" #define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb) @@ -63,9 +64,20 @@ static int rockchip_drm_fb_create_handle(struct drm_framebuffer *fb, rockchip_fb->obj[0], handle); } +static int rockchip_drm_fb_dirty(struct drm_framebuffer *fb, + struct drm_file *file, + unsigned int flags, unsigned int color, + struct drm_clip_rect *clips, + unsigned int num_clips) +{ + rockchip_drm_psr_flush(fb->dev); + return 0; +} + static const struct drm_framebuffer_funcs rockchip_drm_fb_funcs = { .destroy = rockchip_drm_fb_destroy, .create_handle = rockchip_drm_fb_create_handle, + .dirty = rockchip_drm_fb_dirty, }; static struct rockchip_drm_fb * @@ -233,7 +245,8 @@ rockchip_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_commit_modeset_enables(dev, state); - drm_atomic_helper_commit_planes(dev, state, true); + drm_atomic_helper_commit_planes(dev, state, + DRM_PLANE_COMMIT_ACTIVE_ONLY); drm_atomic_helper_commit_hw_done(state); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c new file mode 100644 index 000000000000..c6ac5d01c8e4 --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c @@ -0,0 +1,245 @@ +/* + * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd + * Author: Yakir Yang <ykk@rock-chips.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> + +#include "rockchip_drm_drv.h" +#include "rockchip_drm_psr.h" + +#define PSR_FLUSH_TIMEOUT msecs_to_jiffies(3000) /* 3 seconds */ + +enum psr_state { + PSR_FLUSH, + PSR_ENABLE, + PSR_DISABLE, +}; + +struct psr_drv { + struct list_head list; + struct drm_encoder *encoder; + + spinlock_t lock; + enum psr_state state; + + struct timer_list flush_timer; + + void (*set)(struct drm_encoder *encoder, bool enable); +}; + +static struct psr_drv *find_psr_by_crtc(struct drm_crtc *crtc) +{ + struct rockchip_drm_private *drm_drv = crtc->dev->dev_private; + struct psr_drv *psr; + unsigned long flags; + + spin_lock_irqsave(&drm_drv->psr_list_lock, flags); + list_for_each_entry(psr, &drm_drv->psr_list, list) { + if (psr->encoder->crtc == crtc) + goto out; + } + psr = ERR_PTR(-ENODEV); + +out: + spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags); + return psr; +} + +static void psr_set_state_locked(struct psr_drv *psr, enum psr_state state) +{ + /* + * Allowed finite state machine: + * + * PSR_ENABLE < = = = = = > PSR_FLUSH + * | ^ | + * | | | + * v | | + * PSR_DISABLE < - - - - - - - - - + */ + if (state == psr->state) + return; + + /* Requesting a flush when disabled is a noop */ + if (state == PSR_FLUSH && psr->state == PSR_DISABLE) + return; + + psr->state = state; + + /* Already disabled in flush, change the state, but not the hardware */ + if (state == PSR_DISABLE && psr->state == PSR_FLUSH) + return; + + /* Actually commit the state change to hardware */ + switch (psr->state) { + case PSR_ENABLE: + psr->set(psr->encoder, true); + break; + + case PSR_DISABLE: + case PSR_FLUSH: + psr->set(psr->encoder, false); + break; + } +} + +static void psr_set_state(struct psr_drv *psr, enum psr_state state) +{ + unsigned long flags; + + spin_lock_irqsave(&psr->lock, flags); + psr_set_state_locked(psr, state); + spin_unlock_irqrestore(&psr->lock, flags); +} + +static void psr_flush_handler(unsigned long data) +{ + struct psr_drv *psr = (struct psr_drv *)data; + unsigned long flags; + + /* If the state has changed since we initiated the flush, do nothing */ + spin_lock_irqsave(&psr->lock, flags); + if (psr->state == PSR_FLUSH) + psr_set_state_locked(psr, PSR_ENABLE); + spin_unlock_irqrestore(&psr->lock, flags); +} + +/** + * rockchip_drm_psr_enable - enable the encoder PSR which bind to given CRTC + * @crtc: CRTC to obtain the PSR encoder + * + * Returns: + * Zero on success, negative errno on failure. + */ +int rockchip_drm_psr_enable(struct drm_crtc *crtc) +{ + struct psr_drv *psr = find_psr_by_crtc(crtc); + + if (IS_ERR(psr)) + return PTR_ERR(psr); + + psr_set_state(psr, PSR_ENABLE); + return 0; +} +EXPORT_SYMBOL(rockchip_drm_psr_enable); + +/** + * rockchip_drm_psr_disable - disable the encoder PSR which bind to given CRTC + * @crtc: CRTC to obtain the PSR encoder + * + * Returns: + * Zero on success, negative errno on failure. + */ +int rockchip_drm_psr_disable(struct drm_crtc *crtc) +{ + struct psr_drv *psr = find_psr_by_crtc(crtc); + + if (IS_ERR(psr)) + return PTR_ERR(psr); + + psr_set_state(psr, PSR_DISABLE); + return 0; +} +EXPORT_SYMBOL(rockchip_drm_psr_disable); + +/** + * rockchip_drm_psr_flush - force to flush all registered PSR encoders + * @dev: drm device + * + * Disable the PSR function for all registered encoders, and then enable the + * PSR function back after PSR_FLUSH_TIMEOUT. If encoder PSR state have been + * changed during flush time, then keep the state no change after flush + * timeout. + * + * Returns: + * Zero on success, negative errno on failure. + */ +void rockchip_drm_psr_flush(struct drm_device *dev) +{ + struct rockchip_drm_private *drm_drv = dev->dev_private; + struct psr_drv *psr; + unsigned long flags; + + spin_lock_irqsave(&drm_drv->psr_list_lock, flags); + list_for_each_entry(psr, &drm_drv->psr_list, list) { + mod_timer(&psr->flush_timer, + round_jiffies_up(jiffies + PSR_FLUSH_TIMEOUT)); + + psr_set_state(psr, PSR_FLUSH); + } + spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags); +} +EXPORT_SYMBOL(rockchip_drm_psr_flush); + +/** + * rockchip_drm_psr_register - register encoder to psr driver + * @encoder: encoder that obtain the PSR function + * @psr_set: call back to set PSR state + * + * Returns: + * Zero on success, negative errno on failure. + */ +int rockchip_drm_psr_register(struct drm_encoder *encoder, + void (*psr_set)(struct drm_encoder *, bool enable)) +{ + struct rockchip_drm_private *drm_drv = encoder->dev->dev_private; + struct psr_drv *psr; + unsigned long flags; + + if (!encoder || !psr_set) + return -EINVAL; + + psr = kzalloc(sizeof(struct psr_drv), GFP_KERNEL); + if (!psr) + return -ENOMEM; + + setup_timer(&psr->flush_timer, psr_flush_handler, (unsigned long)psr); + spin_lock_init(&psr->lock); + + psr->state = PSR_DISABLE; + psr->encoder = encoder; + psr->set = psr_set; + + spin_lock_irqsave(&drm_drv->psr_list_lock, flags); + list_add_tail(&psr->list, &drm_drv->psr_list); + spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags); + + return 0; +} +EXPORT_SYMBOL(rockchip_drm_psr_register); + +/** + * rockchip_drm_psr_unregister - unregister encoder to psr driver + * @encoder: encoder that obtain the PSR function + * @psr_set: call back to set PSR state + * + * Returns: + * Zero on success, negative errno on failure. + */ +void rockchip_drm_psr_unregister(struct drm_encoder *encoder) +{ + struct rockchip_drm_private *drm_drv = encoder->dev->dev_private; + struct psr_drv *psr, *n; + unsigned long flags; + + spin_lock_irqsave(&drm_drv->psr_list_lock, flags); + list_for_each_entry_safe(psr, n, &drm_drv->psr_list, list) { + if (psr->encoder == encoder) { + del_timer(&psr->flush_timer); + list_del(&psr->list); + kfree(psr); + } + } + spin_unlock_irqrestore(&drm_drv->psr_list_lock, flags); +} +EXPORT_SYMBOL(rockchip_drm_psr_unregister); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.h b/drivers/gpu/drm/rockchip/rockchip_drm_psr.h new file mode 100644 index 000000000000..c35b68873a30 --- /dev/null +++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd + * Author: Yakir Yang <ykk@rock-chips.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ROCKCHIP_DRM_PSR___ +#define __ROCKCHIP_DRM_PSR___ + +void rockchip_drm_psr_flush(struct drm_device *dev); +int rockchip_drm_psr_enable(struct drm_crtc *crtc); +int rockchip_drm_psr_disable(struct drm_crtc *crtc); + +int rockchip_drm_psr_register(struct drm_encoder *encoder, + void (*psr_set)(struct drm_encoder *, bool enable)); +void rockchip_drm_psr_unregister(struct drm_encoder *encoder); + +#endif /* __ROCKCHIP_DRM_PSR__ */ diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 91305eb7d312..d486049f9722 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -34,17 +34,21 @@ #include "rockchip_drm_drv.h" #include "rockchip_drm_gem.h" #include "rockchip_drm_fb.h" +#include "rockchip_drm_psr.h" #include "rockchip_drm_vop.h" -#define __REG_SET_RELAXED(x, off, mask, shift, v) \ - vop_mask_write_relaxed(x, off, (mask) << shift, (v) << shift) -#define __REG_SET_NORMAL(x, off, mask, shift, v) \ - vop_mask_write(x, off, (mask) << shift, (v) << shift) +#define __REG_SET_RELAXED(x, off, mask, shift, v, write_mask) \ + vop_mask_write(x, off, mask, shift, v, write_mask, true) + +#define __REG_SET_NORMAL(x, off, mask, shift, v, write_mask) \ + vop_mask_write(x, off, mask, shift, v, write_mask, false) #define REG_SET(x, base, reg, v, mode) \ - __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v) + __REG_SET_##mode(x, base + reg.offset, \ + reg.mask, reg.shift, v, reg.write_mask) #define REG_SET_MASK(x, base, reg, mask, v, mode) \ - __REG_SET_##mode(x, base + reg.offset, mask, reg.shift, v) + __REG_SET_##mode(x, base + reg.offset, \ + mask, reg.shift, v, reg.write_mask) #define VOP_WIN_SET(x, win, name, v) \ REG_SET(x, win->base, win->phy->name, v, RELAXED) @@ -87,8 +91,6 @@ struct vop_plane_state { struct drm_plane_state base; int format; - struct drm_rect src; - struct drm_rect dest; dma_addr_t yrgb_mst; bool enable; }; @@ -108,6 +110,7 @@ struct vop { struct device *dev; struct drm_device *drm_dev; bool is_enabled; + bool vblank_active; /* mutex vsync_ work */ struct mutex vsync_mutex; @@ -118,6 +121,8 @@ struct vop { /* protected by dev->event_lock */ struct drm_pending_vblank_event *event; + struct completion line_flag_completion; + const struct vop_data *data; uint32_t *regsbak; @@ -164,27 +169,25 @@ static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base, } static inline void vop_mask_write(struct vop *vop, uint32_t offset, - uint32_t mask, uint32_t v) + uint32_t mask, uint32_t shift, uint32_t v, + bool write_mask, bool relaxed) { - if (mask) { - uint32_t cached_val = vop->regsbak[offset >> 2]; - - cached_val = (cached_val & ~mask) | v; - writel(cached_val, vop->regs + offset); - vop->regsbak[offset >> 2] = cached_val; - } -} + if (!mask) + return; -static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset, - uint32_t mask, uint32_t v) -{ - if (mask) { + if (write_mask) { + v = ((v << shift) & 0xffff) | (mask << (shift + 16)); + } else { uint32_t cached_val = vop->regsbak[offset >> 2]; - cached_val = (cached_val & ~mask) | v; - writel_relaxed(cached_val, vop->regs + offset); - vop->regsbak[offset >> 2] = cached_val; + v = (cached_val & ~(mask << shift)) | ((v & mask) << shift); + vop->regsbak[offset >> 2] = v; } + + if (relaxed) + writel_relaxed(v, vop->regs + offset); + else + writel(v, vop->regs + offset); } static inline uint32_t vop_get_intr_type(struct vop *vop, @@ -240,7 +243,7 @@ static enum vop_data_format vop_convert_format(uint32_t format) case DRM_FORMAT_NV24: return VOP_FMT_YUV444SP; default: - DRM_ERROR("unsupport format[%08x]\n", format); + DRM_ERROR("unsupported format[%08x]\n", format); return -EINVAL; } } @@ -317,7 +320,7 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win, int vskiplines = 0; if (dst_w > 3840) { - DRM_ERROR("Maximum destination width (3840) exceeded\n"); + DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n"); return; } @@ -355,11 +358,11 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win, VOP_SCL_SET_EXT(vop, win, lb_mode, lb_mode); if (lb_mode == LB_RGB_3840X2) { if (yrgb_ver_scl_mode != SCALE_NONE) { - DRM_ERROR("ERROR : not allow yrgb ver scale\n"); + DRM_DEV_ERROR(vop->dev, "not allow yrgb ver scale\n"); return; } if (cbcr_ver_scl_mode != SCALE_NONE) { - DRM_ERROR("ERROR : not allow cbcr ver scale\n"); + DRM_DEV_ERROR(vop->dev, "not allow cbcr ver scale\n"); return; } vsu_mode = SCALE_UP_BIL; @@ -430,7 +433,72 @@ static void vop_dsp_hold_valid_irq_disable(struct vop *vop) spin_unlock_irqrestore(&vop->irq_lock, flags); } -static void vop_enable(struct drm_crtc *crtc) +/* + * (1) each frame starts at the start of the Vsync pulse which is signaled by + * the "FRAME_SYNC" interrupt. + * (2) the active data region of each frame ends at dsp_vact_end + * (3) we should program this same number (dsp_vact_end) into dsp_line_frag_num, + * to get "LINE_FLAG" interrupt at the end of the active on screen data. + * + * VOP_INTR_CTRL0.dsp_line_frag_num = VOP_DSP_VACT_ST_END.dsp_vact_end + * Interrupts + * LINE_FLAG -------------------------------+ + * FRAME_SYNC ----+ | + * | | + * v v + * | Vsync | Vbp | Vactive | Vfp | + * ^ ^ ^ ^ + * | | | | + * | | | | + * dsp_vs_end ------------+ | | | VOP_DSP_VTOTAL_VS_END + * dsp_vact_start --------------+ | | VOP_DSP_VACT_ST_END + * dsp_vact_end ----------------------------+ | VOP_DSP_VACT_ST_END + * dsp_total -------------------------------------+ VOP_DSP_VTOTAL_VS_END + */ +static bool vop_line_flag_irq_is_enabled(struct vop *vop) +{ + uint32_t line_flag_irq; + unsigned long flags; + + spin_lock_irqsave(&vop->irq_lock, flags); + + line_flag_irq = VOP_INTR_GET_TYPE(vop, enable, LINE_FLAG_INTR); + + spin_unlock_irqrestore(&vop->irq_lock, flags); + + return !!line_flag_irq; +} + +static void vop_line_flag_irq_enable(struct vop *vop, int line_num) +{ + unsigned long flags; + + if (WARN_ON(!vop->is_enabled)) + return; + + spin_lock_irqsave(&vop->irq_lock, flags); + + VOP_CTRL_SET(vop, line_flag_num[0], line_num); + VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 1); + + spin_unlock_irqrestore(&vop->irq_lock, flags); +} + +static void vop_line_flag_irq_disable(struct vop *vop) +{ + unsigned long flags; + + if (WARN_ON(!vop->is_enabled)) + return; + + spin_lock_irqsave(&vop->irq_lock, flags); + + VOP_INTR_SET_TYPE(vop, enable, LINE_FLAG_INTR, 0); + + spin_unlock_irqrestore(&vop->irq_lock, flags); +} + +static int vop_enable(struct drm_crtc *crtc) { struct vop *vop = to_vop(crtc); int ret; @@ -438,26 +506,20 @@ static void vop_enable(struct drm_crtc *crtc) ret = pm_runtime_get_sync(vop->dev); if (ret < 0) { dev_err(vop->dev, "failed to get pm runtime: %d\n", ret); - return; + goto err_put_pm_runtime; } ret = clk_enable(vop->hclk); - if (ret < 0) { - dev_err(vop->dev, "failed to enable hclk - %d\n", ret); - return; - } + if (WARN_ON(ret < 0)) + goto err_put_pm_runtime; ret = clk_enable(vop->dclk); - if (ret < 0) { - dev_err(vop->dev, "failed to enable dclk - %d\n", ret); + if (WARN_ON(ret < 0)) goto err_disable_hclk; - } ret = clk_enable(vop->aclk); - if (ret < 0) { - dev_err(vop->dev, "failed to enable aclk - %d\n", ret); + if (WARN_ON(ret < 0)) goto err_disable_dclk; - } /* * Slave iommu shares power, irq and clock with vop. It was associated @@ -487,7 +549,7 @@ static void vop_enable(struct drm_crtc *crtc) drm_crtc_vblank_on(crtc); - return; + return 0; err_disable_aclk: clk_disable(vop->aclk); @@ -495,6 +557,9 @@ err_disable_dclk: clk_disable(vop->dclk); err_disable_hclk: clk_disable(vop->hclk); +err_put_pm_runtime: + pm_runtime_put_sync(vop->dev); + return ret; } static void vop_crtc_disable(struct drm_crtc *crtc) @@ -569,7 +634,7 @@ static void vop_plane_destroy(struct drm_plane *plane) } static int vop_plane_prepare_fb(struct drm_plane *plane, - const struct drm_plane_state *new_state) + struct drm_plane_state *new_state) { if (plane->state->fb) drm_framebuffer_reference(plane->state->fb); @@ -578,7 +643,7 @@ static int vop_plane_prepare_fb(struct drm_plane *plane, } static void vop_plane_cleanup_fb(struct drm_plane *plane, - const struct drm_plane_state *old_state) + struct drm_plane_state *old_state) { if (old_state->fb) drm_framebuffer_unreference(old_state->fb); @@ -593,10 +658,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane, struct vop_win *vop_win = to_vop_win(plane); struct vop_plane_state *vop_plane_state = to_vop_plane_state(state); const struct vop_win_data *win = vop_win->data; - bool visible; int ret; - struct drm_rect *dest = &vop_plane_state->dest; - struct drm_rect *src = &vop_plane_state->src; struct drm_rect clip; int min_scale = win->phy->scl ? FRAC_16_16(1, 8) : DRM_PLANE_HELPER_NO_SCALING; @@ -610,30 +672,18 @@ static int vop_plane_atomic_check(struct drm_plane *plane, if (WARN_ON(!crtc_state)) return -EINVAL; - src->x1 = state->src_x; - src->y1 = state->src_y; - src->x2 = state->src_x + state->src_w; - src->y2 = state->src_y + state->src_h; - dest->x1 = state->crtc_x; - dest->y1 = state->crtc_y; - dest->x2 = state->crtc_x + state->crtc_w; - dest->y2 = state->crtc_y + state->crtc_h; - clip.x1 = 0; clip.y1 = 0; clip.x2 = crtc_state->adjusted_mode.hdisplay; clip.y2 = crtc_state->adjusted_mode.vdisplay; - ret = drm_plane_helper_check_update(plane, crtc, state->fb, - src, dest, &clip, - state->rotation, - min_scale, - max_scale, - true, true, &visible); + ret = drm_plane_helper_check_state(state, &clip, + min_scale, max_scale, + true, true); if (ret) return ret; - if (!visible) + if (!state->visible) goto out_disable; vop_plane_state->format = vop_convert_format(fb->pixel_format); @@ -644,7 +694,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane, * Src.x1 can be odd when do clip, but yuv plane start point * need align with 2 pixel. */ - if (is_yuv_support(fb->pixel_format) && ((src->x1 >> 16) % 2)) + if (is_yuv_support(fb->pixel_format) && ((state->src.x1 >> 16) % 2)) return -EINVAL; vop_plane_state->enable = true; @@ -694,8 +744,8 @@ static void vop_plane_atomic_update(struct drm_plane *plane, unsigned int actual_w, actual_h; unsigned int dsp_stx, dsp_sty; uint32_t act_info, dsp_info, dsp_st; - struct drm_rect *src = &vop_plane_state->src; - struct drm_rect *dest = &vop_plane_state->dest; + struct drm_rect *src = &state->src; + struct drm_rect *dest = &state->dst; struct drm_gem_object *obj, *uv_obj; struct rockchip_gem_object *rk_obj, *rk_uv_obj; unsigned long offset; @@ -869,6 +919,8 @@ static int vop_crtc_enable_vblank(struct drm_crtc *crtc) spin_unlock_irqrestore(&vop->irq_lock, flags); + rockchip_drm_psr_disable(&vop->crtc); + return 0; } @@ -885,6 +937,8 @@ static void vop_crtc_disable_vblank(struct drm_crtc *crtc) VOP_INTR_SET_TYPE(vop, enable, FS_INTR, 0); spin_unlock_irqrestore(&vop->irq_lock, flags); + + rockchip_drm_psr_enable(&vop->crtc); } static void vop_crtc_wait_for_update(struct drm_crtc *crtc) @@ -928,11 +982,17 @@ static void vop_crtc_enable(struct drm_crtc *crtc) u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start; u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start; u16 vact_end = vact_st + vdisplay; - uint32_t val; + uint32_t pin_pol, val; + int ret; WARN_ON(vop->event); - vop_enable(crtc); + ret = vop_enable(crtc); + if (ret) { + DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret); + return; + } + /* * If dclk rate is zero, mean that scanout is stop, * we don't need wait any more. @@ -969,25 +1029,31 @@ static void vop_crtc_enable(struct drm_crtc *crtc) vop_dsp_hold_valid_irq_disable(vop); } - val = 0x8; - val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1; - val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1); - VOP_CTRL_SET(vop, pin_pol, val); + pin_pol = 0x8; + pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1; + pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1); + VOP_CTRL_SET(vop, pin_pol, pin_pol); + switch (s->output_type) { case DRM_MODE_CONNECTOR_LVDS: VOP_CTRL_SET(vop, rgb_en, 1); + VOP_CTRL_SET(vop, rgb_pin_pol, pin_pol); break; case DRM_MODE_CONNECTOR_eDP: + VOP_CTRL_SET(vop, edp_pin_pol, pin_pol); VOP_CTRL_SET(vop, edp_en, 1); break; case DRM_MODE_CONNECTOR_HDMIA: + VOP_CTRL_SET(vop, hdmi_pin_pol, pin_pol); VOP_CTRL_SET(vop, hdmi_en, 1); break; case DRM_MODE_CONNECTOR_DSI: + VOP_CTRL_SET(vop, mipi_pin_pol, pin_pol); VOP_CTRL_SET(vop, mipi_en, 1); break; default: - DRM_ERROR("unsupport connector_type[%d]\n", s->output_type); + DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n", + s->output_type); } VOP_CTRL_SET(vop, out_mode, s->output_mode); @@ -1029,10 +1095,11 @@ static void vop_crtc_atomic_begin(struct drm_crtc *crtc, struct vop *vop = to_vop(crtc); spin_lock_irq(&crtc->dev->event_lock); - if (crtc->state->event) { - WARN_ON(drm_crtc_vblank_get(crtc) != 0); - WARN_ON(vop->event); + vop->vblank_active = true; + WARN_ON(drm_crtc_vblank_get(crtc) != 0); + WARN_ON(vop->event); + if (crtc->state->event) { vop->event = crtc->state->event; crtc->state->event = NULL; } @@ -1119,12 +1186,14 @@ static void vop_handle_vblank(struct vop *vop) spin_lock_irqsave(&drm->event_lock, flags); if (vop->event) { - drm_crtc_send_vblank_event(crtc, vop->event); - drm_crtc_vblank_put(crtc); vop->event = NULL; } + if (vop->vblank_active) { + vop->vblank_active = false; + drm_crtc_vblank_put(crtc); + } spin_unlock_irqrestore(&drm->event_lock, flags); if (!completion_done(&vop->wait_update_complete)) @@ -1162,6 +1231,12 @@ static irqreturn_t vop_isr(int irq, void *data) ret = IRQ_HANDLED; } + if (active_irqs & LINE_FLAG_INTR) { + complete(&vop->line_flag_completion); + active_irqs &= ~LINE_FLAG_INTR; + ret = IRQ_HANDLED; + } + if (active_irqs & FS_INTR) { drm_crtc_handle_vblank(crtc); vop_handle_vblank(vop); @@ -1171,7 +1246,8 @@ static irqreturn_t vop_isr(int irq, void *data) /* Unhandled irqs are spurious. */ if (active_irqs) - DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs); + DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n", + active_irqs); return ret; } @@ -1206,7 +1282,8 @@ static int vop_create_crtc(struct vop *vop) win_data->phy->nformats, win_data->type, NULL); if (ret) { - DRM_ERROR("failed to initialize plane\n"); + DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n", + ret); goto err_cleanup_planes; } @@ -1244,7 +1321,8 @@ static int vop_create_crtc(struct vop *vop) win_data->phy->nformats, win_data->type, NULL); if (ret) { - DRM_ERROR("failed to initialize overlay plane\n"); + DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n", + ret); goto err_cleanup_crtc; } drm_plane_helper_add(&vop_win->base, &plane_helper_funcs); @@ -1252,14 +1330,15 @@ static int vop_create_crtc(struct vop *vop) port = of_get_child_by_name(dev->of_node, "port"); if (!port) { - DRM_ERROR("no port node found in %s\n", - dev->of_node->full_name); + DRM_DEV_ERROR(vop->dev, "no port node found in %s\n", + dev->of_node->full_name); ret = -ENOENT; goto err_cleanup_crtc; } init_completion(&vop->dsp_hold_completion); init_completion(&vop->wait_update_complete); + init_completion(&vop->line_flag_completion); crtc->port = port; rockchip_register_crtc_funcs(crtc, &private_crtc_funcs); @@ -1387,6 +1466,7 @@ static int vop_initial(struct vop *vop) clk_disable(vop->aclk); vop->is_enabled = false; + vop->vblank_active = false; return 0; @@ -1416,6 +1496,49 @@ static void vop_win_init(struct vop *vop) } } +/** + * rockchip_drm_wait_line_flag - acqiure the give line flag event + * @crtc: CRTC to enable line flag + * @line_num: interested line number + * @mstimeout: millisecond for timeout + * + * Driver would hold here until the interested line flag interrupt have + * happened or timeout to wait. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int rockchip_drm_wait_line_flag(struct drm_crtc *crtc, unsigned int line_num, + unsigned int mstimeout) +{ + struct vop *vop = to_vop(crtc); + unsigned long jiffies_left; + + if (!crtc || !vop->is_enabled) + return -ENODEV; + + if (line_num > crtc->mode.vtotal || mstimeout <= 0) + return -EINVAL; + + if (vop_line_flag_irq_is_enabled(vop)) + return -EBUSY; + + reinit_completion(&vop->line_flag_completion); + vop_line_flag_irq_enable(vop, line_num); + + jiffies_left = wait_for_completion_timeout(&vop->line_flag_completion, + msecs_to_jiffies(mstimeout)); + vop_line_flag_irq_disable(vop); + + if (jiffies_left == 0) { + dev_err(vop->dev, "Timeout waiting for IRQ\n"); + return -ETIMEDOUT; + } + + return 0; +} +EXPORT_SYMBOL(rockchip_drm_wait_line_flag); + static int vop_bind(struct device *dev, struct device *master, void *data) { struct platform_device *pdev = to_platform_device(dev); @@ -1484,6 +1607,7 @@ static int vop_bind(struct device *dev, struct device *master, void *data) return ret; pm_runtime_enable(&pdev->dev); + return 0; } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h index 071ff0be7a95..1dbc52615257 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h @@ -33,6 +33,7 @@ struct vop_reg { uint32_t offset; uint32_t shift; uint32_t mask; + bool write_mask; }; struct vop_ctrl { @@ -48,6 +49,10 @@ struct vop_ctrl { struct vop_reg dither_down; struct vop_reg dither_up; struct vop_reg pin_pol; + struct vop_reg rgb_pin_pol; + struct vop_reg hdmi_pin_pol; + struct vop_reg edp_pin_pol; + struct vop_reg mipi_pin_pol; struct vop_reg htotal_pw; struct vop_reg hact_st_end; @@ -56,6 +61,8 @@ struct vop_ctrl { struct vop_reg hpost_st_end; struct vop_reg vpost_st_end; + struct vop_reg line_flag_num[2]; + struct vop_reg cfg_done; }; diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c index 919992cdc97e..35c51f3402f2 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c @@ -23,7 +23,14 @@ #define VOP_REG(off, _mask, s) \ {.offset = off, \ .mask = _mask, \ - .shift = s,} + .shift = s, \ + .write_mask = false,} + +#define VOP_REG_MASK(off, _mask, s) \ + {.offset = off, \ + .mask = _mask, \ + .shift = s, \ + .write_mask = true,} static const uint32_t formats_win_full[] = { DRM_FORMAT_XRGB8888, @@ -50,6 +57,89 @@ static const uint32_t formats_win_lite[] = { DRM_FORMAT_BGR565, }; +static const struct vop_scl_regs rk3036_win_scl = { + .scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0), + .scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16), + .scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0), + .scale_cbcr_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 16), +}; + +static const struct vop_win_phy rk3036_win0_data = { + .scl = &rk3036_win_scl, + .data_formats = formats_win_full, + .nformats = ARRAY_SIZE(formats_win_full), + .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0), + .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 3), + .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 15), + .act_info = VOP_REG(RK3036_WIN0_ACT_INFO, 0x1fff1fff, 0), + .dsp_info = VOP_REG(RK3036_WIN0_DSP_INFO, 0x0fff0fff, 0), + .dsp_st = VOP_REG(RK3036_WIN0_DSP_ST, 0x1fff1fff, 0), + .yrgb_mst = VOP_REG(RK3036_WIN0_YRGB_MST, 0xffffffff, 0), + .uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0), + .yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0), + .uv_vir = VOP_REG(RK3036_WIN0_VIR, 0x1fff, 16), +}; + +static const struct vop_win_phy rk3036_win1_data = { + .data_formats = formats_win_lite, + .nformats = ARRAY_SIZE(formats_win_lite), + .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1), + .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6), + .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19), + .act_info = VOP_REG(RK3036_WIN1_ACT_INFO, 0x1fff1fff, 0), + .dsp_info = VOP_REG(RK3036_WIN1_DSP_INFO, 0x0fff0fff, 0), + .dsp_st = VOP_REG(RK3036_WIN1_DSP_ST, 0x1fff1fff, 0), + .yrgb_mst = VOP_REG(RK3036_WIN1_MST, 0xffffffff, 0), + .yrgb_vir = VOP_REG(RK3036_WIN1_VIR, 0xffff, 0), +}; + +static const struct vop_win_data rk3036_vop_win_data[] = { + { .base = 0x00, .phy = &rk3036_win0_data, + .type = DRM_PLANE_TYPE_PRIMARY }, + { .base = 0x00, .phy = &rk3036_win1_data, + .type = DRM_PLANE_TYPE_CURSOR }, +}; + +static const int rk3036_vop_intrs[] = { + DSP_HOLD_VALID_INTR, + FS_INTR, + LINE_FLAG_INTR, + BUS_ERROR_INTR, +}; + +static const struct vop_intr rk3036_intr = { + .intrs = rk3036_vop_intrs, + .nintrs = ARRAY_SIZE(rk3036_vop_intrs), + .status = VOP_REG(RK3036_INT_STATUS, 0xf, 0), + .enable = VOP_REG(RK3036_INT_STATUS, 0xf, 4), + .clear = VOP_REG(RK3036_INT_STATUS, 0xf, 8), +}; + +static const struct vop_ctrl rk3036_ctrl_data = { + .standby = VOP_REG(RK3036_SYS_CTRL, 0x1, 30), + .out_mode = VOP_REG(RK3036_DSP_CTRL0, 0xf, 0), + .pin_pol = VOP_REG(RK3036_DSP_CTRL0, 0xf, 4), + .htotal_pw = VOP_REG(RK3036_DSP_HTOTAL_HS_END, 0x1fff1fff, 0), + .hact_st_end = VOP_REG(RK3036_DSP_HACT_ST_END, 0x1fff1fff, 0), + .vtotal_pw = VOP_REG(RK3036_DSP_VTOTAL_VS_END, 0x1fff1fff, 0), + .vact_st_end = VOP_REG(RK3036_DSP_VACT_ST_END, 0x1fff1fff, 0), + .line_flag_num[0] = VOP_REG(RK3036_INT_STATUS, 0xfff, 12), + .cfg_done = VOP_REG(RK3036_REG_CFG_DONE, 0x1, 0), +}; + +static const struct vop_reg_data rk3036_vop_init_reg_table[] = { + {RK3036_DSP_CTRL1, 0x00000000}, +}; + +static const struct vop_data rk3036_vop = { + .init_table = rk3036_vop_init_reg_table, + .table_size = ARRAY_SIZE(rk3036_vop_init_reg_table), + .ctrl = &rk3036_ctrl_data, + .intr = &rk3036_intr, + .win = rk3036_vop_win_data, + .win_size = ARRAY_SIZE(rk3036_vop_win_data), +}; + static const struct vop_scl_extension rk3288_win_full_scl_ext = { .cbcr_vsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 31), .cbcr_vsu_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 30), @@ -133,6 +223,7 @@ static const struct vop_ctrl rk3288_ctrl_data = { .vact_st_end = VOP_REG(RK3288_DSP_VACT_ST_END, 0x1fff1fff, 0), .hpost_st_end = VOP_REG(RK3288_POST_DSP_HACT_INFO, 0x1fff1fff, 0), .vpost_st_end = VOP_REG(RK3288_POST_DSP_VACT_INFO, 0x1fff1fff, 0), + .line_flag_num[0] = VOP_REG(RK3288_INTR_CTRL0, 0x1fff, 12), .cfg_done = VOP_REG(RK3288_REG_CFG_DONE, 0x1, 0), }; @@ -190,93 +281,104 @@ static const struct vop_data rk3288_vop = { .win_size = ARRAY_SIZE(rk3288_vop_win_data), }; -static const struct vop_scl_regs rk3036_win_scl = { - .scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0), - .scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16), - .scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0), - .scale_cbcr_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 16), -}; - -static const struct vop_win_phy rk3036_win0_data = { - .scl = &rk3036_win_scl, - .data_formats = formats_win_full, - .nformats = ARRAY_SIZE(formats_win_full), - .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0), - .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 3), - .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 15), - .act_info = VOP_REG(RK3036_WIN0_ACT_INFO, 0x1fff1fff, 0), - .dsp_info = VOP_REG(RK3036_WIN0_DSP_INFO, 0x0fff0fff, 0), - .dsp_st = VOP_REG(RK3036_WIN0_DSP_ST, 0x1fff1fff, 0), - .yrgb_mst = VOP_REG(RK3036_WIN0_YRGB_MST, 0xffffffff, 0), - .uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0), - .yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0), - .uv_vir = VOP_REG(RK3036_WIN0_VIR, 0x1fff, 16), +static const struct vop_ctrl rk3399_ctrl_data = { + .standby = VOP_REG(RK3399_SYS_CTRL, 0x1, 22), + .gate_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 23), + .rgb_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 12), + .hdmi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 13), + .edp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 14), + .mipi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 15), + .dither_down = VOP_REG(RK3399_DSP_CTRL1, 0xf, 1), + .dither_up = VOP_REG(RK3399_DSP_CTRL1, 0x1, 6), + .data_blank = VOP_REG(RK3399_DSP_CTRL0, 0x1, 19), + .out_mode = VOP_REG(RK3399_DSP_CTRL0, 0xf, 0), + .rgb_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16), + .hdmi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 20), + .edp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 24), + .mipi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 28), + .htotal_pw = VOP_REG(RK3399_DSP_HTOTAL_HS_END, 0x1fff1fff, 0), + .hact_st_end = VOP_REG(RK3399_DSP_HACT_ST_END, 0x1fff1fff, 0), + .vtotal_pw = VOP_REG(RK3399_DSP_VTOTAL_VS_END, 0x1fff1fff, 0), + .vact_st_end = VOP_REG(RK3399_DSP_VACT_ST_END, 0x1fff1fff, 0), + .hpost_st_end = VOP_REG(RK3399_POST_DSP_HACT_INFO, 0x1fff1fff, 0), + .vpost_st_end = VOP_REG(RK3399_POST_DSP_VACT_INFO, 0x1fff1fff, 0), + .line_flag_num[0] = VOP_REG(RK3399_LINE_FLAG, 0xffff, 0), + .line_flag_num[1] = VOP_REG(RK3399_LINE_FLAG, 0xffff, 16), + .cfg_done = VOP_REG_MASK(RK3399_REG_CFG_DONE, 0x1, 0), }; -static const struct vop_win_phy rk3036_win1_data = { - .data_formats = formats_win_lite, - .nformats = ARRAY_SIZE(formats_win_lite), - .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1), - .format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6), - .rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19), - .act_info = VOP_REG(RK3036_WIN1_ACT_INFO, 0x1fff1fff, 0), - .dsp_info = VOP_REG(RK3036_WIN1_DSP_INFO, 0x0fff0fff, 0), - .dsp_st = VOP_REG(RK3036_WIN1_DSP_ST, 0x1fff1fff, 0), - .yrgb_mst = VOP_REG(RK3036_WIN1_MST, 0xffffffff, 0), - .yrgb_vir = VOP_REG(RK3036_WIN1_VIR, 0xffff, 0), -}; - -static const struct vop_win_data rk3036_vop_win_data[] = { - { .base = 0x00, .phy = &rk3036_win0_data, - .type = DRM_PLANE_TYPE_PRIMARY }, - { .base = 0x00, .phy = &rk3036_win1_data, - .type = DRM_PLANE_TYPE_CURSOR }, -}; - -static const int rk3036_vop_intrs[] = { - DSP_HOLD_VALID_INTR, +static const int rk3399_vop_intrs[] = { FS_INTR, + 0, 0, LINE_FLAG_INTR, + 0, BUS_ERROR_INTR, + 0, 0, 0, 0, 0, 0, 0, + DSP_HOLD_VALID_INTR, }; -static const struct vop_intr rk3036_intr = { - .intrs = rk3036_vop_intrs, - .nintrs = ARRAY_SIZE(rk3036_vop_intrs), - .status = VOP_REG(RK3036_INT_STATUS, 0xf, 0), - .enable = VOP_REG(RK3036_INT_STATUS, 0xf, 4), - .clear = VOP_REG(RK3036_INT_STATUS, 0xf, 8), +static const struct vop_intr rk3399_vop_intr = { + .intrs = rk3399_vop_intrs, + .nintrs = ARRAY_SIZE(rk3399_vop_intrs), + .status = VOP_REG_MASK(RK3399_INTR_STATUS0, 0xffff, 0), + .enable = VOP_REG_MASK(RK3399_INTR_EN0, 0xffff, 0), + .clear = VOP_REG_MASK(RK3399_INTR_CLEAR0, 0xffff, 0), }; -static const struct vop_ctrl rk3036_ctrl_data = { - .standby = VOP_REG(RK3036_SYS_CTRL, 0x1, 30), - .out_mode = VOP_REG(RK3036_DSP_CTRL0, 0xf, 0), - .pin_pol = VOP_REG(RK3036_DSP_CTRL0, 0xf, 4), - .htotal_pw = VOP_REG(RK3036_DSP_HTOTAL_HS_END, 0x1fff1fff, 0), - .hact_st_end = VOP_REG(RK3036_DSP_HACT_ST_END, 0x1fff1fff, 0), - .vtotal_pw = VOP_REG(RK3036_DSP_VTOTAL_VS_END, 0x1fff1fff, 0), - .vact_st_end = VOP_REG(RK3036_DSP_VACT_ST_END, 0x1fff1fff, 0), - .cfg_done = VOP_REG(RK3036_REG_CFG_DONE, 0x1, 0), +static const struct vop_reg_data rk3399_init_reg_table[] = { + {RK3399_SYS_CTRL, 0x2000f800}, + {RK3399_DSP_CTRL0, 0x00000000}, + {RK3399_WIN0_CTRL0, 0x00000080}, + {RK3399_WIN1_CTRL0, 0x00000080}, + /* TODO: Win2/3 support multiple area function, but we haven't found + * a suitable way to use it yet, so let's just use them as other windows + * with only area 0 enabled. + */ + {RK3399_WIN2_CTRL0, 0x00000010}, + {RK3399_WIN3_CTRL0, 0x00000010}, }; -static const struct vop_reg_data rk3036_vop_init_reg_table[] = { - {RK3036_DSP_CTRL1, 0x00000000}, +static const struct vop_data rk3399_vop_big = { + .init_table = rk3399_init_reg_table, + .table_size = ARRAY_SIZE(rk3399_init_reg_table), + .intr = &rk3399_vop_intr, + .ctrl = &rk3399_ctrl_data, + /* + * rk3399 vop big windows register layout is same as rk3288. + */ + .win = rk3288_vop_win_data, + .win_size = ARRAY_SIZE(rk3288_vop_win_data), }; -static const struct vop_data rk3036_vop = { - .init_table = rk3036_vop_init_reg_table, - .table_size = ARRAY_SIZE(rk3036_vop_init_reg_table), - .ctrl = &rk3036_ctrl_data, - .intr = &rk3036_intr, - .win = rk3036_vop_win_data, - .win_size = ARRAY_SIZE(rk3036_vop_win_data), +static const struct vop_win_data rk3399_vop_lit_win_data[] = { + { .base = 0x00, .phy = &rk3288_win01_data, + .type = DRM_PLANE_TYPE_PRIMARY }, + { .base = 0x00, .phy = &rk3288_win23_data, + .type = DRM_PLANE_TYPE_CURSOR}, +}; + +static const struct vop_data rk3399_vop_lit = { + .init_table = rk3399_init_reg_table, + .table_size = ARRAY_SIZE(rk3399_init_reg_table), + .intr = &rk3399_vop_intr, + .ctrl = &rk3399_ctrl_data, + /* + * rk3399 vop lit windows register layout is same as rk3288, + * but cut off the win1 and win3 windows. + */ + .win = rk3399_vop_lit_win_data, + .win_size = ARRAY_SIZE(rk3399_vop_lit_win_data), }; static const struct of_device_id vop_driver_dt_match[] = { - { .compatible = "rockchip,rk3288-vop", - .data = &rk3288_vop }, { .compatible = "rockchip,rk3036-vop", .data = &rk3036_vop }, + { .compatible = "rockchip,rk3288-vop", + .data = &rk3288_vop }, + { .compatible = "rockchip,rk3399-vop-big", + .data = &rk3399_vop_big }, + { .compatible = "rockchip,rk3399-vop-lit", + .data = &rk3399_vop_lit }, {}, }; MODULE_DEVICE_TABLE(of, vop_driver_dt_match); @@ -305,7 +407,6 @@ static struct platform_driver vop_platform_driver = { .remove = vop_remove, .driver = { .name = "rockchip-vop", - .owner = THIS_MODULE, .of_match_table = of_match_ptr(vop_driver_dt_match), }, }; diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h index d4b46cba2f26..cd197260ece5 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h +++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h @@ -166,4 +166,197 @@ #define RK3036_HWC_LUT_ADDR 0x800 /* rk3036 register definition end */ +/* rk3399 register definition */ +#define RK3399_REG_CFG_DONE 0x00000 +#define RK3399_VERSION_INFO 0x00004 +#define RK3399_SYS_CTRL 0x00008 +#define RK3399_SYS_CTRL1 0x0000c +#define RK3399_DSP_CTRL0 0x00010 +#define RK3399_DSP_CTRL1 0x00014 +#define RK3399_DSP_BG 0x00018 +#define RK3399_MCU_CTRL 0x0001c +#define RK3399_WB_CTRL0 0x00020 +#define RK3399_WB_CTRL1 0x00024 +#define RK3399_WB_YRGB_MST 0x00028 +#define RK3399_WB_CBR_MST 0x0002c +#define RK3399_WIN0_CTRL0 0x00030 +#define RK3399_WIN0_CTRL1 0x00034 +#define RK3399_WIN0_COLOR_KEY 0x00038 +#define RK3399_WIN0_VIR 0x0003c +#define RK3399_WIN0_YRGB_MST 0x00040 +#define RK3399_WIN0_CBR_MST 0x00044 +#define RK3399_WIN0_ACT_INFO 0x00048 +#define RK3399_WIN0_DSP_INFO 0x0004c +#define RK3399_WIN0_DSP_ST 0x00050 +#define RK3399_WIN0_SCL_FACTOR_YRGB 0x00054 +#define RK3399_WIN0_SCL_FACTOR_CBR 0x00058 +#define RK3399_WIN0_SCL_OFFSET 0x0005c +#define RK3399_WIN0_SRC_ALPHA_CTRL 0x00060 +#define RK3399_WIN0_DST_ALPHA_CTRL 0x00064 +#define RK3399_WIN0_FADING_CTRL 0x00068 +#define RK3399_WIN0_CTRL2 0x0006c +#define RK3399_WIN1_CTRL0 0x00070 +#define RK3399_WIN1_CTRL1 0x00074 +#define RK3399_WIN1_COLOR_KEY 0x00078 +#define RK3399_WIN1_VIR 0x0007c +#define RK3399_WIN1_YRGB_MST 0x00080 +#define RK3399_WIN1_CBR_MST 0x00084 +#define RK3399_WIN1_ACT_INFO 0x00088 +#define RK3399_WIN1_DSP_INFO 0x0008c +#define RK3399_WIN1_DSP_ST 0x00090 +#define RK3399_WIN1_SCL_FACTOR_YRGB 0x00094 +#define RK3399_WIN1_SCL_FACTOR_CBR 0x00098 +#define RK3399_WIN1_SCL_OFFSET 0x0009c +#define RK3399_WIN1_SRC_ALPHA_CTRL 0x000a0 +#define RK3399_WIN1_DST_ALPHA_CTRL 0x000a4 +#define RK3399_WIN1_FADING_CTRL 0x000a8 +#define RK3399_WIN1_CTRL2 0x000ac +#define RK3399_WIN2_CTRL0 0x000b0 +#define RK3399_WIN2_CTRL1 0x000b4 +#define RK3399_WIN2_VIR0_1 0x000b8 +#define RK3399_WIN2_VIR2_3 0x000bc +#define RK3399_WIN2_MST0 0x000c0 +#define RK3399_WIN2_DSP_INFO0 0x000c4 +#define RK3399_WIN2_DSP_ST0 0x000c8 +#define RK3399_WIN2_COLOR_KEY 0x000cc +#define RK3399_WIN2_MST1 0x000d0 +#define RK3399_WIN2_DSP_INFO1 0x000d4 +#define RK3399_WIN2_DSP_ST1 0x000d8 +#define RK3399_WIN2_SRC_ALPHA_CTRL 0x000dc +#define RK3399_WIN2_MST2 0x000e0 +#define RK3399_WIN2_DSP_INFO2 0x000e4 +#define RK3399_WIN2_DSP_ST2 0x000e8 +#define RK3399_WIN2_DST_ALPHA_CTRL 0x000ec +#define RK3399_WIN2_MST3 0x000f0 +#define RK3399_WIN2_DSP_INFO3 0x000f4 +#define RK3399_WIN2_DSP_ST3 0x000f8 +#define RK3399_WIN2_FADING_CTRL 0x000fc +#define RK3399_WIN3_CTRL0 0x00100 +#define RK3399_WIN3_CTRL1 0x00104 +#define RK3399_WIN3_VIR0_1 0x00108 +#define RK3399_WIN3_VIR2_3 0x0010c +#define RK3399_WIN3_MST0 0x00110 +#define RK3399_WIN3_DSP_INFO0 0x00114 +#define RK3399_WIN3_DSP_ST0 0x00118 +#define RK3399_WIN3_COLOR_KEY 0x0011c +#define RK3399_WIN3_MST1 0x00120 +#define RK3399_WIN3_DSP_INFO1 0x00124 +#define RK3399_WIN3_DSP_ST1 0x00128 +#define RK3399_WIN3_SRC_ALPHA_CTRL 0x0012c +#define RK3399_WIN3_MST2 0x00130 +#define RK3399_WIN3_DSP_INFO2 0x00134 +#define RK3399_WIN3_DSP_ST2 0x00138 +#define RK3399_WIN3_DST_ALPHA_CTRL 0x0013c +#define RK3399_WIN3_MST3 0x00140 +#define RK3399_WIN3_DSP_INFO3 0x00144 +#define RK3399_WIN3_DSP_ST3 0x00148 +#define RK3399_WIN3_FADING_CTRL 0x0014c +#define RK3399_HWC_CTRL0 0x00150 +#define RK3399_HWC_CTRL1 0x00154 +#define RK3399_HWC_MST 0x00158 +#define RK3399_HWC_DSP_ST 0x0015c +#define RK3399_HWC_SRC_ALPHA_CTRL 0x00160 +#define RK3399_HWC_DST_ALPHA_CTRL 0x00164 +#define RK3399_HWC_FADING_CTRL 0x00168 +#define RK3399_HWC_RESERVED1 0x0016c +#define RK3399_POST_DSP_HACT_INFO 0x00170 +#define RK3399_POST_DSP_VACT_INFO 0x00174 +#define RK3399_POST_SCL_FACTOR_YRGB 0x00178 +#define RK3399_POST_RESERVED 0x0017c +#define RK3399_POST_SCL_CTRL 0x00180 +#define RK3399_POST_DSP_VACT_INFO_F1 0x00184 +#define RK3399_DSP_HTOTAL_HS_END 0x00188 +#define RK3399_DSP_HACT_ST_END 0x0018c +#define RK3399_DSP_VTOTAL_VS_END 0x00190 +#define RK3399_DSP_VACT_ST_END 0x00194 +#define RK3399_DSP_VS_ST_END_F1 0x00198 +#define RK3399_DSP_VACT_ST_END_F1 0x0019c +#define RK3399_PWM_CTRL 0x001a0 +#define RK3399_PWM_PERIOD_HPR 0x001a4 +#define RK3399_PWM_DUTY_LPR 0x001a8 +#define RK3399_PWM_CNT 0x001ac +#define RK3399_BCSH_COLOR_BAR 0x001b0 +#define RK3399_BCSH_BCS 0x001b4 +#define RK3399_BCSH_H 0x001b8 +#define RK3399_BCSH_CTRL 0x001bc +#define RK3399_CABC_CTRL0 0x001c0 +#define RK3399_CABC_CTRL1 0x001c4 +#define RK3399_CABC_CTRL2 0x001c8 +#define RK3399_CABC_CTRL3 0x001cc +#define RK3399_CABC_GAUSS_LINE0_0 0x001d0 +#define RK3399_CABC_GAUSS_LINE0_1 0x001d4 +#define RK3399_CABC_GAUSS_LINE1_0 0x001d8 +#define RK3399_CABC_GAUSS_LINE1_1 0x001dc +#define RK3399_CABC_GAUSS_LINE2_0 0x001e0 +#define RK3399_CABC_GAUSS_LINE2_1 0x001e4 +#define RK3399_FRC_LOWER01_0 0x001e8 +#define RK3399_FRC_LOWER01_1 0x001ec +#define RK3399_FRC_LOWER10_0 0x001f0 +#define RK3399_FRC_LOWER10_1 0x001f4 +#define RK3399_FRC_LOWER11_0 0x001f8 +#define RK3399_FRC_LOWER11_1 0x001fc +#define RK3399_AFBCD0_CTRL 0x00200 +#define RK3399_AFBCD0_HDR_PTR 0x00204 +#define RK3399_AFBCD0_PIC_SIZE 0x00208 +#define RK3399_AFBCD0_STATUS 0x0020c +#define RK3399_AFBCD1_CTRL 0x00220 +#define RK3399_AFBCD1_HDR_PTR 0x00224 +#define RK3399_AFBCD1_PIC_SIZE 0x00228 +#define RK3399_AFBCD1_STATUS 0x0022c +#define RK3399_AFBCD2_CTRL 0x00240 +#define RK3399_AFBCD2_HDR_PTR 0x00244 +#define RK3399_AFBCD2_PIC_SIZE 0x00248 +#define RK3399_AFBCD2_STATUS 0x0024c +#define RK3399_AFBCD3_CTRL 0x00260 +#define RK3399_AFBCD3_HDR_PTR 0x00264 +#define RK3399_AFBCD3_PIC_SIZE 0x00268 +#define RK3399_AFBCD3_STATUS 0x0026c +#define RK3399_INTR_EN0 0x00280 +#define RK3399_INTR_CLEAR0 0x00284 +#define RK3399_INTR_STATUS0 0x00288 +#define RK3399_INTR_RAW_STATUS0 0x0028c +#define RK3399_INTR_EN1 0x00290 +#define RK3399_INTR_CLEAR1 0x00294 +#define RK3399_INTR_STATUS1 0x00298 +#define RK3399_INTR_RAW_STATUS1 0x0029c +#define RK3399_LINE_FLAG 0x002a0 +#define RK3399_VOP_STATUS 0x002a4 +#define RK3399_BLANKING_VALUE 0x002a8 +#define RK3399_MCU_BYPASS_PORT 0x002ac +#define RK3399_WIN0_DSP_BG 0x002b0 +#define RK3399_WIN1_DSP_BG 0x002b4 +#define RK3399_WIN2_DSP_BG 0x002b8 +#define RK3399_WIN3_DSP_BG 0x002bc +#define RK3399_YUV2YUV_WIN 0x002c0 +#define RK3399_YUV2YUV_POST 0x002c4 +#define RK3399_AUTO_GATING_EN 0x002cc +#define RK3399_WIN0_CSC_COE 0x003a0 +#define RK3399_WIN1_CSC_COE 0x003c0 +#define RK3399_WIN2_CSC_COE 0x003e0 +#define RK3399_WIN3_CSC_COE 0x00400 +#define RK3399_HWC_CSC_COE 0x00420 +#define RK3399_BCSH_R2Y_CSC_COE 0x00440 +#define RK3399_BCSH_Y2R_CSC_COE 0x00460 +#define RK3399_POST_YUV2YUV_Y2R_COE 0x00480 +#define RK3399_POST_YUV2YUV_3X3_COE 0x004a0 +#define RK3399_POST_YUV2YUV_R2Y_COE 0x004c0 +#define RK3399_WIN0_YUV2YUV_Y2R 0x004e0 +#define RK3399_WIN0_YUV2YUV_3X3 0x00500 +#define RK3399_WIN0_YUV2YUV_R2Y 0x00520 +#define RK3399_WIN1_YUV2YUV_Y2R 0x00540 +#define RK3399_WIN1_YUV2YUV_3X3 0x00560 +#define RK3399_WIN1_YUV2YUV_R2Y 0x00580 +#define RK3399_WIN2_YUV2YUV_Y2R 0x005a0 +#define RK3399_WIN2_YUV2YUV_3X3 0x005c0 +#define RK3399_WIN2_YUV2YUV_R2Y 0x005e0 +#define RK3399_WIN3_YUV2YUV_Y2R 0x00600 +#define RK3399_WIN3_YUV2YUV_3X3 0x00620 +#define RK3399_WIN3_YUV2YUV_R2Y 0x00640 +#define RK3399_WIN2_LUT_ADDR 0x01000 +#define RK3399_WIN3_LUT_ADDR 0x01400 +#define RK3399_HWC_LUT_ADDR 0x01800 +#define RK3399_CABC_GAMMA_LUT_ADDR 0x01c00 +#define RK3399_GAMMA_LUT_ADDR 0x02000 +/* rk3399 register definition end */ + #endif /* _ROCKCHIP_VOP_REG_H */ diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c index 21aed1febeb4..3b807135a5cd 100644 --- a/drivers/gpu/drm/savage/savage_drv.c +++ b/drivers/gpu/drm/savage/savage_drv.c @@ -50,7 +50,7 @@ static const struct file_operations savage_driver_fops = { static struct drm_driver driver = { .driver_features = - DRIVER_USE_AGP | DRIVER_HAVE_DMA | DRIVER_PCI_DMA, + DRIVER_USE_AGP | DRIVER_HAVE_DMA | DRIVER_PCI_DMA | DRIVER_LEGACY, .dev_priv_size = sizeof(drm_savage_buf_priv_t), .load = savage_driver_load, .firstopen = savage_driver_firstopen, diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c index c01ad0aeaa58..3dc0d8ff95ec 100644 --- a/drivers/gpu/drm/savage/savage_state.c +++ b/drivers/gpu/drm/savage/savage_state.c @@ -1001,15 +1001,9 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_ cmdbuf->cmd_addr = kcmd_addr; } if (cmdbuf->vb_size) { - kvb_addr = kmalloc(cmdbuf->vb_size, GFP_KERNEL); - if (kvb_addr == NULL) { - ret = -ENOMEM; - goto done; - } - - if (copy_from_user(kvb_addr, cmdbuf->vb_addr, - cmdbuf->vb_size)) { - ret = -EFAULT; + kvb_addr = memdup_user(cmdbuf->vb_addr, cmdbuf->vb_size); + if (IS_ERR(kvb_addr)) { + ret = PTR_ERR(kvb_addr); goto done; } cmdbuf->vb_addr = kvb_addr; diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c index 79bce76cb8f7..ae9839886c4d 100644 --- a/drivers/gpu/drm/sis/sis_drv.c +++ b/drivers/gpu/drm/sis/sis_drv.c @@ -102,7 +102,7 @@ static void sis_driver_postclose(struct drm_device *dev, struct drm_file *file) } static struct drm_driver driver = { - .driver_features = DRIVER_USE_AGP, + .driver_features = DRIVER_USE_AGP | DRIVER_LEGACY, .load = sis_driver_load, .unload = sis_driver_unload, .open = sis_driver_open, diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index 96bd3d08b2d4..7cd3804c6dee 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c @@ -178,7 +178,7 @@ static void sti_atomic_complete(struct sti_private *private, */ drm_atomic_helper_commit_modeset_disables(drm, state); - drm_atomic_helper_commit_planes(drm, state, false); + drm_atomic_helper_commit_planes(drm, state, 0); drm_atomic_helper_commit_modeset_enables(drm, state); drm_atomic_helper_wait_for_vblanks(drm, state); @@ -282,7 +282,7 @@ static const struct file_operations sti_driver_fops = { }; static struct drm_driver sti_driver = { - .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC, .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index c4d03c1b6db8..9059e3ef9786 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -17,6 +17,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_gem_cma_helper.h> +#include <drm/drm_fb_helper.h> #include "sun4i_crtc.h" #include "sun4i_drv.h" @@ -109,7 +110,7 @@ static void sun4i_remove_framebuffers(void) ap->ranges[0].base = 0; ap->ranges[0].size = ~0; - remove_conflicting_framebuffers(ap, "sun4i-drm-fb", false); + drm_fb_helper_remove_conflicting_framebuffers(ap, "sun4i-drm-fb", false); kfree(ap); } diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c index fab5ebcb0fef..f418892b0c71 100644 --- a/drivers/gpu/drm/tdfx/tdfx_drv.c +++ b/drivers/gpu/drm/tdfx/tdfx_drv.c @@ -56,6 +56,7 @@ static const struct file_operations tdfx_driver_fops = { }; static struct drm_driver driver = { + .driver_features = DRIVER_LEGACY, .set_busid = drm_pci_set_busid, .fops = &tdfx_driver_fops, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 8495bd01b544..3de7ce33d3d4 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -480,17 +480,6 @@ static const struct drm_plane_funcs tegra_primary_plane_funcs = { .atomic_destroy_state = tegra_plane_atomic_destroy_state, }; -static int tegra_plane_prepare_fb(struct drm_plane *plane, - const struct drm_plane_state *new_state) -{ - return 0; -} - -static void tegra_plane_cleanup_fb(struct drm_plane *plane, - const struct drm_plane_state *old_fb) -{ -} - static int tegra_plane_state_add(struct tegra_plane *plane, struct drm_plane_state *state) { @@ -624,8 +613,6 @@ static void tegra_plane_atomic_disable(struct drm_plane *plane, } static const struct drm_plane_helper_funcs tegra_primary_plane_helper_funcs = { - .prepare_fb = tegra_plane_prepare_fb, - .cleanup_fb = tegra_plane_cleanup_fb, .atomic_check = tegra_plane_atomic_check, .atomic_update = tegra_plane_atomic_update, .atomic_disable = tegra_plane_atomic_disable, @@ -796,8 +783,6 @@ static const struct drm_plane_funcs tegra_cursor_plane_funcs = { }; static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = { - .prepare_fb = tegra_plane_prepare_fb, - .cleanup_fb = tegra_plane_cleanup_fb, .atomic_check = tegra_cursor_atomic_check, .atomic_update = tegra_cursor_atomic_update, .atomic_disable = tegra_cursor_atomic_disable, @@ -866,8 +851,6 @@ static const uint32_t tegra_overlay_plane_formats[] = { }; static const struct drm_plane_helper_funcs tegra_overlay_plane_helper_funcs = { - .prepare_fb = tegra_plane_prepare_fb, - .cleanup_fb = tegra_plane_cleanup_fb, .atomic_check = tegra_plane_atomic_check, .atomic_update = tegra_plane_atomic_update, .atomic_disable = tegra_plane_atomic_disable, diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 755264d9db22..4b9f1c79cd7b 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -57,7 +57,8 @@ static void tegra_atomic_complete(struct tegra_drm *tegra, drm_atomic_helper_commit_modeset_disables(drm, state); drm_atomic_helper_commit_modeset_enables(drm, state); - drm_atomic_helper_commit_planes(drm, state, true); + drm_atomic_helper_commit_planes(drm, state, + DRM_PLANE_COMMIT_ACTIVE_ONLY); drm_atomic_helper_wait_for_vblanks(drm, state); diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile index deeca4869d94..6f675175a9e5 100644 --- a/drivers/gpu/drm/tilcdc/Makefile +++ b/drivers/gpu/drm/tilcdc/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_DRM_TILCDC_SLAVE_COMPAT) += tilcdc_slave_compat.o \ tilcdc_slave_compat.dtb.o tilcdc-y := \ + tilcdc_plane.o \ tilcdc_crtc.o \ tilcdc_tfp410.o \ tilcdc_panel.o \ diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index 107c8bd04f6d..208768922030 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -17,6 +17,7 @@ #include "drm_flip_work.h" #include <drm/drm_plane_helper.h> +#include <drm/drm_atomic_helper.h> #include "tilcdc_drv.h" #include "tilcdc_regs.h" @@ -26,9 +27,10 @@ struct tilcdc_crtc { struct drm_crtc base; + struct drm_plane primary; const struct tilcdc_panel_info *info; struct drm_pending_vblank_event *event; - int dpms; + bool enabled; wait_queue_head_t frame_done_wq; bool frame_done; spinlock_t irq_lock; @@ -67,6 +69,7 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb) struct drm_gem_cma_object *gem; unsigned int depth, bpp; dma_addr_t start, end; + u64 dma_base_and_ceiling; drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp); gem = drm_fb_cma_get_gem_obj(fb, 0); @@ -77,8 +80,13 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb) end = start + (crtc->mode.vdisplay * fb->pitches[0]); - tilcdc_write(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, start); - tilcdc_write(dev, LCDC_DMA_FB_CEILING_ADDR_0_REG, end); + /* Write LCDC_DMA_FB_BASE_ADDR_0_REG and LCDC_DMA_FB_CEILING_ADDR_0_REG + * with a single insruction, if available. This should make it more + * unlikely that LCDC would fetch the DMA addresses in the middle of + * an update. + */ + dma_base_and_ceiling = (u64)(end - 1) << 32 | start; + tilcdc_write64(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, dma_base_and_ceiling); if (tilcdc_crtc->curr_fb) drm_flip_work_queue(&tilcdc_crtc->unref_work, @@ -87,6 +95,43 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb) tilcdc_crtc->curr_fb = fb; } +static void tilcdc_crtc_enable_irqs(struct drm_device *dev) +{ + struct tilcdc_drm_private *priv = dev->dev_private; + + tilcdc_clear_irqstatus(dev, 0xffffffff); + + if (priv->rev == 1) { + tilcdc_set(dev, LCDC_RASTER_CTRL_REG, + LCDC_V1_UNDERFLOW_INT_ENA); + tilcdc_set(dev, LCDC_DMA_CTRL_REG, + LCDC_V1_END_OF_FRAME_INT_ENA); + } else { + tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, + LCDC_V2_UNDERFLOW_INT_ENA | + LCDC_V2_END_OF_FRAME0_INT_ENA | + LCDC_FRAME_DONE | LCDC_SYNC_LOST); + } +} + +static void tilcdc_crtc_disable_irqs(struct drm_device *dev) +{ + struct tilcdc_drm_private *priv = dev->dev_private; + + /* disable irqs that we might have enabled: */ + if (priv->rev == 1) { + tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, + LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA); + tilcdc_clear(dev, LCDC_DMA_CTRL_REG, + LCDC_V1_END_OF_FRAME_INT_ENA); + } else { + tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, + LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA | + LCDC_V2_END_OF_FRAME0_INT_ENA | + LCDC_FRAME_DONE | LCDC_SYNC_LOST); + } +} + static void reset(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -100,66 +145,101 @@ static void reset(struct drm_crtc *crtc) tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET); } -static void start(struct drm_crtc *crtc) +static void tilcdc_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; + struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); + + if (tilcdc_crtc->enabled) + return; + + pm_runtime_get_sync(dev->dev); reset(crtc); + tilcdc_crtc_enable_irqs(dev); + tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE); tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_PALETTE_LOAD_MODE(DATA_ONLY)); tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE); + + drm_crtc_vblank_on(crtc); + + tilcdc_crtc->enabled = true; } -static void stop(struct drm_crtc *crtc) +void tilcdc_crtc_disable(struct drm_crtc *crtc) { + struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); struct drm_device *dev = crtc->dev; + struct tilcdc_drm_private *priv = dev->dev_private; + + if (!tilcdc_crtc->enabled) + return; + tilcdc_crtc->frame_done = false; tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE); + + /* + * if necessary wait for framedone irq which will still come + * before putting things to sleep.. + */ + if (priv->rev == 2) { + int ret = wait_event_timeout(tilcdc_crtc->frame_done_wq, + tilcdc_crtc->frame_done, + msecs_to_jiffies(500)); + if (ret == 0) + dev_err(dev->dev, "%s: timeout waiting for framedone\n", + __func__); + } + + drm_crtc_vblank_off(crtc); + + tilcdc_crtc_disable_irqs(dev); + + pm_runtime_put_sync(dev->dev); + + if (tilcdc_crtc->next_fb) { + drm_flip_work_queue(&tilcdc_crtc->unref_work, + tilcdc_crtc->next_fb); + tilcdc_crtc->next_fb = NULL; + } + + if (tilcdc_crtc->curr_fb) { + drm_flip_work_queue(&tilcdc_crtc->unref_work, + tilcdc_crtc->curr_fb); + tilcdc_crtc->curr_fb = NULL; + } + + drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq); + tilcdc_crtc->last_vblank = ktime_set(0, 0); + + tilcdc_crtc->enabled = false; +} + +static bool tilcdc_crtc_is_on(struct drm_crtc *crtc) +{ + return crtc->state && crtc->state->enable && crtc->state->active; } static void tilcdc_crtc_destroy(struct drm_crtc *crtc) { struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); - tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + tilcdc_crtc_disable(crtc); of_node_put(crtc->port); drm_crtc_cleanup(crtc); drm_flip_work_cleanup(&tilcdc_crtc->unref_work); } -static int tilcdc_verify_fb(struct drm_crtc *crtc, struct drm_framebuffer *fb) -{ - struct drm_device *dev = crtc->dev; - unsigned int depth, bpp; - - drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp); - - if (fb->pitches[0] != crtc->mode.hdisplay * bpp / 8) { - dev_err(dev->dev, - "Invalid pitch: fb and crtc widths must be the same"); - return -EINVAL; - } - - return 0; -} - -static int tilcdc_crtc_page_flip(struct drm_crtc *crtc, +int tilcdc_crtc_update_fb(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags) + struct drm_pending_vblank_event *event) { struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); struct drm_device *dev = crtc->dev; - int r; unsigned long flags; - s64 tdiff; - ktime_t next_vblank; - - r = tilcdc_verify_fb(crtc, fb); - if (r) - return r; if (tilcdc_crtc->event) { dev_err(dev->dev, "already pending page flip!\n"); @@ -170,82 +250,31 @@ static int tilcdc_crtc_page_flip(struct drm_crtc *crtc, crtc->primary->fb = fb; - pm_runtime_get_sync(dev->dev); - spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); - next_vblank = ktime_add_us(tilcdc_crtc->last_vblank, - 1000000 / crtc->hwmode.vrefresh); + if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) { + ktime_t next_vblank; + s64 tdiff; + + next_vblank = ktime_add_us(tilcdc_crtc->last_vblank, + 1000000 / crtc->hwmode.vrefresh); - tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get())); + tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get())); - if (tdiff >= TILCDC_VBLANK_SAFETY_THRESHOLD_US) + if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US) + tilcdc_crtc->next_fb = fb; + } + + if (tilcdc_crtc->next_fb != fb) set_scanout(crtc, fb); - else - tilcdc_crtc->next_fb = fb; tilcdc_crtc->event = event; spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); - pm_runtime_put_sync(dev->dev); - return 0; } -void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode) -{ - struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); - struct drm_device *dev = crtc->dev; - struct tilcdc_drm_private *priv = dev->dev_private; - - /* we really only care about on or off: */ - if (mode != DRM_MODE_DPMS_ON) - mode = DRM_MODE_DPMS_OFF; - - if (tilcdc_crtc->dpms == mode) - return; - - tilcdc_crtc->dpms = mode; - - if (mode == DRM_MODE_DPMS_ON) { - pm_runtime_get_sync(dev->dev); - start(crtc); - } else { - tilcdc_crtc->frame_done = false; - stop(crtc); - - /* - * if necessary wait for framedone irq which will still come - * before putting things to sleep.. - */ - if (priv->rev == 2) { - int ret = wait_event_timeout( - tilcdc_crtc->frame_done_wq, - tilcdc_crtc->frame_done, - msecs_to_jiffies(50)); - if (ret == 0) - dev_err(dev->dev, "timeout waiting for framedone\n"); - } - - pm_runtime_put_sync(dev->dev); - - if (tilcdc_crtc->next_fb) { - drm_flip_work_queue(&tilcdc_crtc->unref_work, - tilcdc_crtc->next_fb); - tilcdc_crtc->next_fb = NULL; - } - - if (tilcdc_crtc->curr_fb) { - drm_flip_work_queue(&tilcdc_crtc->unref_work, - tilcdc_crtc->curr_fb); - tilcdc_crtc->curr_fb = NULL; - } - - drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq); - } -} - static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -275,41 +304,21 @@ static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc, return true; } -static void tilcdc_crtc_prepare(struct drm_crtc *crtc) -{ - tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); -} - -static void tilcdc_crtc_commit(struct drm_crtc *crtc) -{ - tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON); -} - -static int tilcdc_crtc_mode_set(struct drm_crtc *crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, - int x, int y, - struct drm_framebuffer *old_fb) +static void tilcdc_crtc_mode_set_nofb(struct drm_crtc *crtc) { struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); struct drm_device *dev = crtc->dev; struct tilcdc_drm_private *priv = dev->dev_private; const struct tilcdc_panel_info *info = tilcdc_crtc->info; uint32_t reg, hbp, hfp, hsw, vbp, vfp, vsw; - int ret; - - ret = tilcdc_crtc_mode_valid(crtc, mode); - if (WARN_ON(ret)) - return ret; + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct drm_framebuffer *fb = crtc->primary->state->fb; if (WARN_ON(!info)) - return -EINVAL; - - ret = tilcdc_verify_fb(crtc, crtc->primary->fb); - if (ret) - return ret; + return; - pm_runtime_get_sync(dev->dev); + if (WARN_ON(!fb)) + return; /* Configure the Burst Size and fifo threshold of DMA: */ reg = tilcdc_read(dev, LCDC_DMA_CTRL_REG) & ~0x00000770; @@ -330,7 +339,8 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc, reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_16); break; default: - return -EINVAL; + dev_err(dev->dev, "invalid burst size\n"); + return; } reg |= (info->fifo_th << 8); tilcdc_write(dev, LCDC_DMA_CTRL_REG, reg); @@ -344,9 +354,9 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc, vsw = mode->vsync_end - mode->vsync_start; DBG("%dx%d, hbp=%u, hfp=%u, hsw=%u, vbp=%u, vfp=%u, vsw=%u", - mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw); + mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw); - /* Configure the AC Bias Period and Number of Transitions per Interrupt: */ + /* Set AC Bias Period and Number of Transitions per Interrupt: */ reg = tilcdc_read(dev, LCDC_RASTER_TIMING_2_REG) & ~0x000fff00; reg |= LCDC_AC_BIAS_FREQUENCY(info->ac_bias) | LCDC_AC_BIAS_TRANSITIONS_PER_INT(info->ac_bias_intrpt); @@ -381,7 +391,7 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc, /* * be sure to set Bit 10 for the V2 LCDC controller, * otherwise limited to 1024 pixels width, stopping - * 1920x1080 being suppoted. + * 1920x1080 being supported. */ if (priv->rev == 2) { if ((mode->vdisplay - 1) & 0x400) { @@ -396,14 +406,15 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc, /* Configure display type: */ reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG) & ~(LCDC_TFT_MODE | LCDC_MONO_8BIT_MODE | LCDC_MONOCHROME_MODE | - LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK | 0x000ff000); + LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK | + 0x000ff000 /* Palette Loading Delay bits */); reg |= LCDC_TFT_MODE; /* no monochrome/passive support */ if (info->tft_alt_mode) reg |= LCDC_TFT_ALT_ENABLE; if (priv->rev == 2) { unsigned int depth, bpp; - drm_fb_get_bpp_depth(crtc->primary->fb->pixel_format, &depth, &bpp); + drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp); switch (bpp) { case 16: break; @@ -415,7 +426,7 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc, break; default: dev_err(dev->dev, "invalid pixel format\n"); - return -EINVAL; + return; } } reg |= info->fdd < 12; @@ -436,12 +447,7 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc, else tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE); - /* - * use value from adjusted_mode here as this might have been - * changed as part of the fixup for slave encoders to solve the - * issue where tilcdc timings are not VESA compliant - */ - if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) + if (mode->flags & DRM_MODE_FLAG_NHSYNC) tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC); else tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC); @@ -456,51 +462,56 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc, else tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER); - drm_framebuffer_reference(crtc->primary->fb); + drm_framebuffer_reference(fb); - set_scanout(crtc, crtc->primary->fb); + set_scanout(crtc, fb); tilcdc_crtc_update_clk(crtc); - pm_runtime_put_sync(dev->dev); - - return 0; + crtc->hwmode = crtc->state->adjusted_mode; } -static int tilcdc_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb) +static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *state) { - struct drm_device *dev = crtc->dev; - int r; - - r = tilcdc_verify_fb(crtc, crtc->primary->fb); - if (r) - return r; - - drm_framebuffer_reference(crtc->primary->fb); + struct drm_display_mode *mode = &state->mode; + int ret; - pm_runtime_get_sync(dev->dev); + /* If we are not active we don't care */ + if (!state->active) + return 0; - set_scanout(crtc, crtc->primary->fb); + if (state->state->planes[0].ptr != crtc->primary || + state->state->planes[0].state == NULL || + state->state->planes[0].state->crtc != crtc) { + dev_dbg(crtc->dev->dev, "CRTC primary plane must be present"); + return -EINVAL; + } - pm_runtime_put_sync(dev->dev); + ret = tilcdc_crtc_mode_valid(crtc, mode); + if (ret) { + dev_dbg(crtc->dev->dev, "Mode \"%s\" not valid", mode->name); + return -EINVAL; + } return 0; } static const struct drm_crtc_funcs tilcdc_crtc_funcs = { - .destroy = tilcdc_crtc_destroy, - .set_config = drm_crtc_helper_set_config, - .page_flip = tilcdc_crtc_page_flip, + .destroy = tilcdc_crtc_destroy, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, }; static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = { - .dpms = tilcdc_crtc_dpms, .mode_fixup = tilcdc_crtc_mode_fixup, - .prepare = tilcdc_crtc_prepare, - .commit = tilcdc_crtc_commit, - .mode_set = tilcdc_crtc_mode_set, - .mode_set_base = tilcdc_crtc_mode_set_base, + .enable = tilcdc_crtc_enable, + .disable = tilcdc_crtc_disable, + .atomic_check = tilcdc_crtc_atomic_check, + .mode_set_nofb = tilcdc_crtc_mode_set_nofb, }; int tilcdc_crtc_max_width(struct drm_crtc *crtc) @@ -622,18 +633,15 @@ void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc, void tilcdc_crtc_update_clk(struct drm_crtc *crtc) { - struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); struct drm_device *dev = crtc->dev; struct tilcdc_drm_private *priv = dev->dev_private; - int dpms = tilcdc_crtc->dpms; unsigned long lcd_clk; const unsigned clkdiv = 2; /* using a fixed divider of 2 */ int ret; pm_runtime_get_sync(dev->dev); - if (dpms == DRM_MODE_DPMS_ON) - tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + tilcdc_crtc_disable(crtc); /* mode.clock is in KHz, set_rate wants parameter in Hz */ ret = clk_set_rate(priv->clk, crtc->mode.clock * 1000 * clkdiv); @@ -657,8 +665,8 @@ void tilcdc_crtc_update_clk(struct drm_crtc *crtc) LCDC_V2_DMA_CLK_EN | LCDC_V2_LIDD_CLK_EN | LCDC_V2_CORE_CLK_EN); - if (dpms == DRM_MODE_DPMS_ON) - tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON); + if (tilcdc_crtc_is_on(crtc)) + tilcdc_crtc_enable(crtc); out: pm_runtime_put_sync(dev->dev); @@ -718,30 +726,34 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc) tilcdc_crtc->frame_intact = true; } + if (stat & LCDC_FIFO_UNDERFLOW) + dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underfow", + __func__, stat); + + /* For revision 2 only */ if (priv->rev == 2) { if (stat & LCDC_FRAME_DONE) { tilcdc_crtc->frame_done = true; wake_up(&tilcdc_crtc->frame_done_wq); } - tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0); - } - if (stat & LCDC_SYNC_LOST) { - dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost", - __func__, stat); - tilcdc_crtc->frame_intact = false; - if (tilcdc_crtc->sync_lost_count++ > SYNC_LOST_COUNT_LIMIT) { - dev_err(dev->dev, - "%s(0x%08x): Sync lost flood detected, disabling the interrupt", - __func__, stat); - tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, - LCDC_SYNC_LOST); + if (stat & LCDC_SYNC_LOST) { + dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost", + __func__, stat); + tilcdc_crtc->frame_intact = false; + if (tilcdc_crtc->sync_lost_count++ > + SYNC_LOST_COUNT_LIMIT) { + dev_err(dev->dev, "%s(0x%08x): Sync lost flood detected, disabling the interrupt", __func__, stat); + tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, + LCDC_SYNC_LOST); + } } - } - if (stat & LCDC_FIFO_UNDERFLOW) - dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underfow", - __func__, stat); + /* Indicate to LCDC that the interrupt service routine has + * completed, see 13.3.6.1.6 in AM335x TRM. + */ + tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0); + } return IRQ_HANDLED; } @@ -761,7 +773,10 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev) crtc = &tilcdc_crtc->base; - tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF; + ret = tilcdc_plane_init(dev, &tilcdc_crtc->primary); + if (ret < 0) + goto fail; + init_waitqueue_head(&tilcdc_crtc->frame_done_wq); drm_flip_work_init(&tilcdc_crtc->unref_work, @@ -769,7 +784,11 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev) spin_lock_init(&tilcdc_crtc->irq_lock); - ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs); + ret = drm_crtc_init_with_planes(dev, crtc, + &tilcdc_crtc->primary, + NULL, + &tilcdc_crtc_funcs, + "tilcdc crtc"); if (ret < 0) goto fail; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index d27809372d54..f8892e9ad169 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -20,6 +20,8 @@ #include <linux/component.h> #include <linux/pinctrl/consumer.h> #include <linux/suspend.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> #include "tilcdc_drv.h" #include "tilcdc_regs.h" @@ -31,6 +33,20 @@ static LIST_HEAD(module_list); +static const u32 tilcdc_rev1_formats[] = { DRM_FORMAT_RGB565 }; + +static const u32 tilcdc_straight_formats[] = { DRM_FORMAT_RGB565, + DRM_FORMAT_BGR888, + DRM_FORMAT_XBGR8888 }; + +static const u32 tilcdc_crossed_formats[] = { DRM_FORMAT_BGR565, + DRM_FORMAT_RGB888, + DRM_FORMAT_XRGB8888 }; + +static const u32 tilcdc_legacy_formats[] = { DRM_FORMAT_RGB565, + DRM_FORMAT_RGB888, + DRM_FORMAT_XRGB8888 }; + void tilcdc_module_init(struct tilcdc_module *mod, const char *name, const struct tilcdc_module_ops *funcs) { @@ -59,9 +75,84 @@ static void tilcdc_fb_output_poll_changed(struct drm_device *dev) drm_fbdev_cma_hotplug_event(priv->fbdev); } +int tilcdc_atomic_check(struct drm_device *dev, + struct drm_atomic_state *state) +{ + int ret; + + ret = drm_atomic_helper_check_modeset(dev, state); + if (ret) + return ret; + + ret = drm_atomic_helper_check_planes(dev, state); + if (ret) + return ret; + + /* + * tilcdc ->atomic_check can update ->mode_changed if pixel format + * changes, hence will we check modeset changes again. + */ + ret = drm_atomic_helper_check_modeset(dev, state); + if (ret) + return ret; + + return ret; +} + +static int tilcdc_commit(struct drm_device *dev, + struct drm_atomic_state *state, + bool async) +{ + int ret; + + ret = drm_atomic_helper_prepare_planes(dev, state); + if (ret) + return ret; + + drm_atomic_helper_swap_state(state, true); + + /* + * Everything below can be run asynchronously without the need to grab + * any modeset locks at all under one condition: It must be guaranteed + * that the asynchronous work has either been cancelled (if the driver + * supports it, which at least requires that the framebuffers get + * cleaned up with drm_atomic_helper_cleanup_planes()) or completed + * before the new state gets committed on the software side with + * drm_atomic_helper_swap_state(). + * + * This scheme allows new atomic state updates to be prepared and + * checked in parallel to the asynchronous completion of the previous + * update. Which is important since compositors need to figure out the + * composition of the next frame right after having submitted the + * current layout. + */ + + /* Keep HW on while we commit the state. */ + pm_runtime_get_sync(dev->dev); + + drm_atomic_helper_commit_modeset_disables(dev, state); + + drm_atomic_helper_commit_planes(dev, state, 0); + + drm_atomic_helper_commit_modeset_enables(dev, state); + + /* Now HW should remain on if need becase the crtc is enabled */ + pm_runtime_put_sync(dev->dev); + + drm_atomic_helper_wait_for_vblanks(dev, state); + + drm_atomic_helper_cleanup_planes(dev, state); + + drm_atomic_state_free(state); + + return 0; +} + static const struct drm_mode_config_funcs mode_config_funcs = { .fb_create = tilcdc_fb_create, .output_poll_changed = tilcdc_fb_output_poll_changed, + .atomic_check = tilcdc_atomic_check, + .atomic_commit = tilcdc_commit, }; static int modeset_init(struct drm_device *dev) @@ -112,7 +203,7 @@ static int tilcdc_unload(struct drm_device *dev) { struct tilcdc_drm_private *priv = dev->dev_private; - tilcdc_crtc_dpms(priv->crtc, DRM_MODE_DPMS_OFF); + tilcdc_crtc_disable(priv->crtc); tilcdc_remove_external_encoders(dev); @@ -121,9 +212,7 @@ static int tilcdc_unload(struct drm_device *dev) drm_mode_config_cleanup(dev); drm_vblank_cleanup(dev); - pm_runtime_get_sync(dev->dev); drm_irq_uninstall(dev); - pm_runtime_put_sync(dev->dev); #ifdef CONFIG_CPU_FREQ cpufreq_unregister_notifier(&priv->freq_transition, @@ -146,24 +235,17 @@ static int tilcdc_unload(struct drm_device *dev) return 0; } -static size_t tilcdc_num_regs(void); - static int tilcdc_load(struct drm_device *dev, unsigned long flags) { struct platform_device *pdev = dev->platformdev; struct device_node *node = pdev->dev.of_node; struct tilcdc_drm_private *priv; - struct tilcdc_module *mod; struct resource *res; u32 bpp = 0; int ret; priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL); - if (priv) - priv->saved_register = - devm_kcalloc(dev->dev, tilcdc_num_regs(), - sizeof(*priv->saved_register), GFP_KERNEL); - if (!priv || !priv->saved_register) { + if (!priv) { dev_err(dev->dev, "failed to allocate private data\n"); return -ENOMEM; } @@ -249,6 +331,37 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags) pm_runtime_put_sync(dev->dev); + if (priv->rev == 1) { + DBG("Revision 1 LCDC supports only RGB565 format"); + priv->pixelformats = tilcdc_rev1_formats; + priv->num_pixelformats = ARRAY_SIZE(tilcdc_rev1_formats); + bpp = 16; + } else { + const char *str = "\0"; + + of_property_read_string(node, "blue-and-red-wiring", &str); + if (0 == strcmp(str, "crossed")) { + DBG("Configured for crossed blue and red wires"); + priv->pixelformats = tilcdc_crossed_formats; + priv->num_pixelformats = + ARRAY_SIZE(tilcdc_crossed_formats); + bpp = 32; /* Choose bpp with RGB support for fbdef */ + } else if (0 == strcmp(str, "straight")) { + DBG("Configured for straight blue and red wires"); + priv->pixelformats = tilcdc_straight_formats; + priv->num_pixelformats = + ARRAY_SIZE(tilcdc_straight_formats); + bpp = 16; /* Choose bpp with RGB support for fbdef */ + } else { + DBG("Blue and red wiring '%s' unknown, use legacy mode", + str); + priv->pixelformats = tilcdc_legacy_formats; + priv->num_pixelformats = + ARRAY_SIZE(tilcdc_legacy_formats); + bpp = 16; /* This is just a guess */ + } + } + ret = modeset_init(dev); if (ret < 0) { dev_err(dev->dev, "failed to initialize mode setting\n"); @@ -262,7 +375,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags) if (ret < 0) goto fail_mode_config_cleanup; - ret = tilcdc_add_external_encoders(dev, &bpp); + ret = tilcdc_add_external_encoders(dev); if (ret < 0) goto fail_component_cleanup; } @@ -279,22 +392,14 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags) goto fail_external_cleanup; } - pm_runtime_get_sync(dev->dev); ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0)); - pm_runtime_put_sync(dev->dev); if (ret < 0) { dev_err(dev->dev, "failed to install IRQ handler\n"); goto fail_vblank_cleanup; } - list_for_each_entry(mod, &module_list, list) { - DBG("%s: preferred_bpp: %d", mod->name, mod->preferred_bpp); - bpp = mod->preferred_bpp; - if (bpp > 0) - break; - } + drm_mode_config_reset(dev); - drm_helper_disable_unused_functions(dev); priv->fbdev = drm_fbdev_cma_init(dev, bpp, dev->mode_config.num_crtc, dev->mode_config.num_connector); @@ -308,9 +413,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags) return 0; fail_irq_uninstall: - pm_runtime_get_sync(dev->dev); drm_irq_uninstall(dev); - pm_runtime_put_sync(dev->dev); fail_vblank_cleanup: drm_vblank_cleanup(dev); @@ -361,45 +464,6 @@ static irqreturn_t tilcdc_irq(int irq, void *arg) return tilcdc_crtc_irq(priv->crtc); } -static void tilcdc_irq_preinstall(struct drm_device *dev) -{ - tilcdc_clear_irqstatus(dev, 0xffffffff); -} - -static int tilcdc_irq_postinstall(struct drm_device *dev) -{ - struct tilcdc_drm_private *priv = dev->dev_private; - - /* enable FIFO underflow irq: */ - if (priv->rev == 1) { - tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_UNDERFLOW_INT_ENA); - } else { - tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, - LCDC_V2_UNDERFLOW_INT_ENA | - LCDC_V2_END_OF_FRAME0_INT_ENA | - LCDC_FRAME_DONE | LCDC_SYNC_LOST); - } - - return 0; -} - -static void tilcdc_irq_uninstall(struct drm_device *dev) -{ - struct tilcdc_drm_private *priv = dev->dev_private; - - /* disable irqs that we might have enabled: */ - if (priv->rev == 1) { - tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, - LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA); - tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_V1_END_OF_FRAME_INT_ENA); - } else { - tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, - LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA | - LCDC_V2_END_OF_FRAME0_INT_ENA | - LCDC_FRAME_DONE | LCDC_SYNC_LOST); - } -} - static int tilcdc_enable_vblank(struct drm_device *dev, unsigned int pipe) { return 0; @@ -410,7 +474,7 @@ static void tilcdc_disable_vblank(struct drm_device *dev, unsigned int pipe) return; } -#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_PM_SLEEP) +#if defined(CONFIG_DEBUG_FS) static const struct { const char *name; uint8_t rev; @@ -441,15 +505,6 @@ static const struct { #undef REG }; -static size_t tilcdc_num_regs(void) -{ - return ARRAY_SIZE(registers); -} -#else -static size_t tilcdc_num_regs(void) -{ - return 0; -} #endif #ifdef CONFIG_DEBUG_FS @@ -537,14 +592,11 @@ static const struct file_operations fops = { static struct drm_driver tilcdc_driver = { .driver_features = (DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET | - DRIVER_PRIME), + DRIVER_PRIME | DRIVER_ATOMIC), .load = tilcdc_load, .unload = tilcdc_unload, .lastclose = tilcdc_lastclose, .irq_handler = tilcdc_irq, - .irq_preinstall = tilcdc_irq_preinstall, - .irq_postinstall = tilcdc_irq_postinstall, - .irq_uninstall = tilcdc_irq_uninstall, .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = tilcdc_enable_vblank, .disable_vblank = tilcdc_disable_vblank, @@ -584,28 +636,12 @@ static int tilcdc_pm_suspend(struct device *dev) { struct drm_device *ddev = dev_get_drvdata(dev); struct tilcdc_drm_private *priv = ddev->dev_private; - unsigned i, n = 0; - drm_kms_helper_poll_disable(ddev); + priv->saved_state = drm_atomic_helper_suspend(ddev); /* Select sleep pin state */ pinctrl_pm_select_sleep_state(dev); - if (pm_runtime_suspended(dev)) { - priv->ctx_valid = false; - return 0; - } - - /* Disable the LCDC controller, to avoid locking up the PRCM */ - tilcdc_crtc_dpms(priv->crtc, DRM_MODE_DPMS_OFF); - - /* Save register state: */ - for (i = 0; i < ARRAY_SIZE(registers); i++) - if (registers[i].save && (priv->rev >= registers[i].rev)) - priv->saved_register[n++] = tilcdc_read(ddev, registers[i].reg); - - priv->ctx_valid = true; - return 0; } @@ -613,23 +649,15 @@ static int tilcdc_pm_resume(struct device *dev) { struct drm_device *ddev = dev_get_drvdata(dev); struct tilcdc_drm_private *priv = ddev->dev_private; - unsigned i, n = 0; + int ret = 0; /* Select default pin state */ pinctrl_pm_select_default_state(dev); - if (priv->ctx_valid == true) { - /* Restore register state: */ - for (i = 0; i < ARRAY_SIZE(registers); i++) - if (registers[i].save && - (priv->rev >= registers[i].rev)) - tilcdc_write(ddev, registers[i].reg, - priv->saved_register[n++]); - } - - drm_kms_helper_poll_enable(ddev); + if (priv->saved_state) + ret = drm_atomic_helper_resume(ddev, priv->saved_state); - return 0; + return ret; } #endif @@ -648,6 +676,12 @@ static int tilcdc_bind(struct device *dev) static void tilcdc_unbind(struct device *dev) { + struct drm_device *ddev = dev_get_drvdata(dev); + + /* Check if a subcomponent has already triggered the unloading. */ + if (!ddev->dev_private) + return; + drm_put_dev(dev_get_drvdata(dev)); } @@ -680,17 +714,15 @@ static int tilcdc_pdev_probe(struct platform_device *pdev) static int tilcdc_pdev_remove(struct platform_device *pdev) { - struct drm_device *ddev = dev_get_drvdata(&pdev->dev); - struct tilcdc_drm_private *priv = ddev->dev_private; - - /* Check if a subcomponent has already triggered the unloading. */ - if (!priv) - return 0; + int ret; - if (priv->is_componentized) - component_master_del(&pdev->dev, &tilcdc_comp_ops); - else + ret = tilcdc_get_external_components(&pdev->dev, NULL); + if (ret < 0) + return ret; + else if (ret == 0) drm_put_dev(platform_get_drvdata(pdev)); + else + component_master_del(&pdev->dev, &tilcdc_comp_ops); return 0; } diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h index c1de18bae415..a6e5e6d4970c 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h @@ -65,9 +65,12 @@ struct tilcdc_drm_private { */ uint32_t max_width; - /* register contents saved across suspend/resume: */ - u32 *saved_register; - bool ctx_valid; + /* Supported pixel formats */ + const uint32_t *pixelformats; + uint32_t num_pixelformats; + + /* The context for pm susped/resume cycle is stored here */ + struct drm_atomic_state *saved_state; #ifdef CONFIG_CPU_FREQ struct notifier_block freq_transition; @@ -113,7 +116,6 @@ struct tilcdc_module { const char *name; struct list_head list; const struct tilcdc_module_ops *funcs; - unsigned int preferred_bpp; }; void tilcdc_module_init(struct tilcdc_module *mod, const char *name, @@ -171,6 +173,11 @@ void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc, bool simulate_vesa_sync); int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode); int tilcdc_crtc_max_width(struct drm_crtc *crtc); -void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode); +void tilcdc_crtc_disable(struct drm_crtc *crtc); +int tilcdc_crtc_update_fb(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event); + +int tilcdc_plane_init(struct drm_device *dev, struct drm_plane *plane); #endif /* __TILCDC_DRV_H__ */ diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c index 03acb4f99982..68e895021005 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_external.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c @@ -52,7 +52,7 @@ static int tilcdc_external_mode_valid(struct drm_connector *connector, return MODE_OK; } -static int tilcdc_add_external_encoder(struct drm_device *dev, int *bpp, +static int tilcdc_add_external_encoder(struct drm_device *dev, struct drm_connector *connector) { struct tilcdc_drm_private *priv = dev->dev_private; @@ -64,7 +64,6 @@ static int tilcdc_add_external_encoder(struct drm_device *dev, int *bpp, /* Only tda998x is supported at the moment. */ tilcdc_crtc_set_simulate_vesa_sync(priv->crtc, true); tilcdc_crtc_set_panel_info(priv->crtc, &panel_info_tda998x); - *bpp = panel_info_tda998x.bpp; connector_funcs = devm_kzalloc(dev->dev, sizeof(*connector_funcs), GFP_KERNEL); @@ -94,7 +93,7 @@ static int tilcdc_add_external_encoder(struct drm_device *dev, int *bpp, return 0; } -int tilcdc_add_external_encoders(struct drm_device *dev, int *bpp) +int tilcdc_add_external_encoders(struct drm_device *dev) { struct tilcdc_drm_private *priv = dev->dev_private; struct drm_connector *connector; @@ -108,7 +107,7 @@ int tilcdc_add_external_encoders(struct drm_device *dev, int *bpp) if (connector == priv->connectors[i]) found = true; if (!found) { - ret = tilcdc_add_external_encoder(dev, bpp, connector); + ret = tilcdc_add_external_encoder(dev, connector); if (ret) return ret; } @@ -138,14 +137,23 @@ static int dev_match_of(struct device *dev, void *data) int tilcdc_get_external_components(struct device *dev, struct component_match **match) { + struct device_node *node; struct device_node *ep = NULL; int count = 0; - while ((ep = of_graph_get_next_endpoint(dev->of_node, ep))) { - struct device_node *node; + /* Avoid error print by of_graph_get_next_endpoint() if there + * is no ports present. + */ + node = of_get_child_by_name(dev->of_node, "ports"); + if (!node) + node = of_get_child_by_name(dev->of_node, "port"); + if (!node) + return 0; + of_node_put(node); + while ((ep = of_graph_get_next_endpoint(dev->of_node, ep))) { node = of_graph_get_remote_port_parent(ep); - if (!node && !of_device_is_available(node)) { + if (!node || !of_device_is_available(node)) { of_node_put(node); continue; } diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.h b/drivers/gpu/drm/tilcdc/tilcdc_external.h index 6aabe2788760..c700e0c1623e 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_external.h +++ b/drivers/gpu/drm/tilcdc/tilcdc_external.h @@ -18,7 +18,7 @@ #ifndef __TILCDC_EXTERNAL_H__ #define __TILCDC_EXTERNAL_H__ -int tilcdc_add_external_encoders(struct drm_device *dev, int *bpp); +int tilcdc_add_external_encoders(struct drm_device *dev); void tilcdc_remove_external_encoders(struct drm_device *dev); int tilcdc_get_external_components(struct device *dev, struct component_match **match); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c index ff7774c17d7c..7b3650901930 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c @@ -22,6 +22,7 @@ #include <video/display_timing.h> #include <video/of_display_timing.h> #include <video/videomode.h> +#include <drm/drm_atomic_helper.h> #include "tilcdc_drv.h" @@ -64,9 +65,7 @@ static void panel_encoder_dpms(struct drm_encoder *encoder, int mode) static void panel_encoder_prepare(struct drm_encoder *encoder) { - struct panel_encoder *panel_encoder = to_panel_encoder(encoder); panel_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); - tilcdc_crtc_set_panel_info(encoder->crtc, panel_encoder->mod->info); } static void panel_encoder_commit(struct drm_encoder *encoder) @@ -196,9 +195,12 @@ static struct drm_encoder *panel_connector_best_encoder( static const struct drm_connector_funcs panel_connector_funcs = { .destroy = panel_connector_destroy, - .dpms = drm_helper_connector_dpms, + .dpms = drm_atomic_helper_connector_dpms, .detect = panel_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static const struct drm_connector_helper_funcs panel_connector_helper_funcs = { @@ -268,6 +270,9 @@ static int panel_modeset_init(struct tilcdc_module *mod, struct drm_device *dev) priv->encoders[priv->num_encoders++] = encoder; priv->connectors[priv->num_connectors++] = connector; + tilcdc_crtc_set_panel_info(priv->crtc, + to_panel_encoder(encoder)->mod->info); + return 0; } @@ -392,8 +397,6 @@ static int panel_probe(struct platform_device *pdev) goto fail_timings; } - mod->preferred_bpp = panel_mod->info->bpp; - return 0; fail_timings: diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c new file mode 100644 index 000000000000..74c65fa859b2 --- /dev/null +++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c @@ -0,0 +1,129 @@ +/* + * Copyright (C) 2015 Texas Instruments + * Author: Jyri Sarha <jsarha@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <drm/drmP.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_plane_helper.h> +#include <drm/drm_atomic_helper.h> +#include <uapi/drm/drm_fourcc.h> + +#include "tilcdc_drv.h" + +static struct drm_plane_funcs tilcdc_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + .set_property = drm_atomic_helper_plane_set_property, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static int tilcdc_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_crtc_state *crtc_state; + struct drm_plane_state *old_state = plane->state; + unsigned int depth, bpp; + + if (!state->crtc) + return 0; + + if (WARN_ON(!state->fb)) + return -EINVAL; + + if (state->crtc_x || state->crtc_y) { + dev_err(plane->dev->dev, "%s: crtc position must be zero.", + __func__); + return -EINVAL; + } + + crtc_state = drm_atomic_get_existing_crtc_state(state->state, + state->crtc); + /* we should have a crtc state if the plane is attached to a crtc */ + if (WARN_ON(!crtc_state)) + return 0; + + if (crtc_state->mode.hdisplay != state->crtc_w || + crtc_state->mode.vdisplay != state->crtc_h) { + dev_err(plane->dev->dev, + "%s: Size must match mode (%dx%d == %dx%d)", __func__, + crtc_state->mode.hdisplay, crtc_state->mode.vdisplay, + state->crtc_w, state->crtc_h); + return -EINVAL; + } + + drm_fb_get_bpp_depth(state->fb->pixel_format, &depth, &bpp); + if (state->fb->pitches[0] != crtc_state->mode.hdisplay * bpp / 8) { + dev_err(plane->dev->dev, + "Invalid pitch: fb and crtc widths must be the same"); + return -EINVAL; + } + + if (state->fb && old_state->fb && + state->fb->pixel_format != old_state->fb->pixel_format) { + dev_dbg(plane->dev->dev, + "%s(): pixel format change requires mode_change\n", + __func__); + crtc_state->mode_changed = true; + } + + return 0; +} + +static void tilcdc_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_plane_state *state = plane->state; + + if (!state->crtc) + return; + + if (WARN_ON(!state->fb || !state->crtc->state)) + return; + + tilcdc_crtc_update_fb(state->crtc, + state->fb, + state->crtc->state->event); +} + +static const struct drm_plane_helper_funcs plane_helper_funcs = { + .atomic_check = tilcdc_plane_atomic_check, + .atomic_update = tilcdc_plane_atomic_update, +}; + +int tilcdc_plane_init(struct drm_device *dev, + struct drm_plane *plane) +{ + struct tilcdc_drm_private *priv = dev->dev_private; + int ret; + + ret = drm_plane_init(dev, plane, 1, + &tilcdc_plane_funcs, + priv->pixelformats, + priv->num_pixelformats, + true); + if (ret) { + dev_err(dev->dev, "Failed to initialize plane: %d\n", ret); + return ret; + } + + drm_plane_helper_add(plane, &plane_helper_funcs); + + return 0; +} diff --git a/drivers/gpu/drm/tilcdc/tilcdc_regs.h b/drivers/gpu/drm/tilcdc/tilcdc_regs.h index 1bf5e2553acc..f57c0d62c76a 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_regs.h +++ b/drivers/gpu/drm/tilcdc/tilcdc_regs.h @@ -119,6 +119,20 @@ static inline void tilcdc_write(struct drm_device *dev, u32 reg, u32 data) iowrite32(data, priv->mmio + reg); } +static inline void tilcdc_write64(struct drm_device *dev, u32 reg, u64 data) +{ + struct tilcdc_drm_private *priv = dev->dev_private; + volatile void __iomem *addr = priv->mmio + reg; + +#ifdef iowrite64 + iowrite64(data, addr); +#else + __iowmb(); + /* This compiles to strd (=64-bit write) on ARM7 */ + *(volatile u64 __force *)addr = __cpu_to_le64(data); +#endif +} + static inline u32 tilcdc_read(struct drm_device *dev, u32 reg) { struct tilcdc_drm_private *priv = dev->dev_private; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c index 6b8c5b3bf588..c6a70da6473d 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c @@ -20,6 +20,7 @@ #include <linux/of_gpio.h> #include <linux/pinctrl/pinmux.h> #include <linux/pinctrl/consumer.h> +#include <drm/drm_atomic_helper.h> #include "tilcdc_drv.h" @@ -75,7 +76,6 @@ static void tfp410_encoder_dpms(struct drm_encoder *encoder, int mode) static void tfp410_encoder_prepare(struct drm_encoder *encoder) { tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); - tilcdc_crtc_set_panel_info(encoder->crtc, &dvi_info); } static void tfp410_encoder_commit(struct drm_encoder *encoder) @@ -201,9 +201,12 @@ static struct drm_encoder *tfp410_connector_best_encoder( static const struct drm_connector_funcs tfp410_connector_funcs = { .destroy = tfp410_connector_destroy, - .dpms = drm_helper_connector_dpms, + .dpms = drm_atomic_helper_connector_dpms, .detect = tfp410_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static const struct drm_connector_helper_funcs tfp410_connector_helper_funcs = { @@ -276,6 +279,7 @@ static int tfp410_modeset_init(struct tilcdc_module *mod, struct drm_device *dev priv->encoders[priv->num_encoders++] = encoder; priv->connectors[priv->num_connectors++] = connector; + tilcdc_crtc_set_panel_info(priv->crtc, &dvi_info); return 0; } @@ -323,8 +327,6 @@ static int tfp410_probe(struct platform_device *pdev) goto fail; } - mod->preferred_bpp = dvi_info.bpp; - i2c_node = of_find_node_by_phandle(i2c_phandle); if (!i2c_node) { dev_err(&pdev->dev, "could not get i2c bus node\n"); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 4054d804fe06..c2a30bdc8a01 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -354,13 +354,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) - ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); + ret = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, mem); else if (bdev->driver->move) ret = bdev->driver->move(bo, evict, interruptible, no_wait_gpu, mem); else - ret = ttm_bo_move_memcpy(bo, evict, interruptible, - no_wait_gpu, mem); + ret = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, mem); if (ret) { if (bdev->driver->move_notify) { diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 2df602a35f92..bf6e21655c57 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -45,14 +45,22 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo) } int ttm_bo_move_ttm(struct ttm_buffer_object *bo, - bool evict, - bool no_wait_gpu, struct ttm_mem_reg *new_mem) + bool interruptible, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) { struct ttm_tt *ttm = bo->ttm; struct ttm_mem_reg *old_mem = &bo->mem; int ret; if (old_mem->mem_type != TTM_PL_SYSTEM) { + ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); + + if (unlikely(ret != 0)) { + if (ret != -ERESTARTSYS) + pr_err("Failed to expire sync object before unbinding TTM\n"); + return ret; + } + ttm_tt_unbind(ttm); ttm_bo_free_old_node(bo); ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, @@ -321,8 +329,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, } int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, - bool evict, bool interruptible, - bool no_wait_gpu, + bool interruptible, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index a1803fbcc898..29855be96be0 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -600,3 +600,9 @@ size_t ttm_round_pot(size_t size) return 0; } EXPORT_SYMBOL(ttm_round_pot); + +uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob) +{ + return glob->zone_kernel->max_mem; +} +EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size); diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index 4709b54c204c..d2f57c52f7db 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c @@ -150,8 +150,5 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder) drm_connector_register(connector); drm_mode_connector_attach_encoder(connector, encoder); - drm_object_attach_property(&connector->base, - dev->mode_config.dirty_info_property, - 1); return 0; } diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 17d34e0edbdd..f0851db6ba6c 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -16,6 +16,20 @@ static int udl_driver_set_busid(struct drm_device *d, struct drm_master *m) return 0; } +static int udl_usb_suspend(struct usb_interface *interface, + pm_message_t message) +{ + return 0; +} + +static int udl_usb_resume(struct usb_interface *interface) +{ + struct drm_device *dev = usb_get_intfdata(interface); + + udl_modeset_restore(dev); + return 0; +} + static const struct vm_operations_struct udl_gem_vm_ops = { .fault = udl_gem_fault, .open = drm_gem_vm_open, @@ -122,6 +136,8 @@ static struct usb_driver udl_driver = { .name = "udl", .probe = udl_usb_probe, .disconnect = udl_usb_disconnect, + .suspend = udl_usb_suspend, + .resume = udl_usb_resume, .id_table = id_table, }; module_usb_driver(udl_driver); diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index 0b03d34ffdee..f338a576efc8 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h @@ -52,6 +52,7 @@ struct udl_device { struct device *dev; struct drm_device *ddev; struct usb_device *udev; + struct drm_crtc *crtc; int sku_pixel_limit; @@ -87,6 +88,7 @@ struct udl_framebuffer { /* modeset */ int udl_modeset_init(struct drm_device *dev); +void udl_modeset_restore(struct drm_device *dev); void udl_modeset_cleanup(struct drm_device *dev); int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder); diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index d5df555aeba0..9688bfa92ccd 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -203,6 +203,7 @@ static int udl_fb_open(struct fb_info *info, int user) ufbdev->fb_count++; +#ifdef CONFIG_DRM_FBDEV_EMULATION if (fb_defio && (info->fbdefio == NULL)) { /* enable defio at last moment if not disabled by client */ @@ -218,6 +219,7 @@ static int udl_fb_open(struct fb_info *info, int user) info->fbdefio = fbdefio; fb_deferred_io_init(info); } +#endif pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n", info->node, user, info, ufbdev->fb_count); @@ -235,12 +237,14 @@ static int udl_fb_release(struct fb_info *info, int user) ufbdev->fb_count--; +#ifdef CONFIG_DRM_FBDEV_EMULATION if ((ufbdev->fb_count == 0) && (info->fbdefio)) { fb_deferred_io_cleanup(info); kfree(info->fbdefio); info->fbdefio = NULL; info->fbops->fb_mmap = udl_fb_mmap; } +#endif pr_warn("released /dev/fb%d user=%d count=%d\n", info->node, user, ufbdev->fb_count); diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index 33dbfb2c4748..29f0207fa677 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -16,6 +16,8 @@ /* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */ #define BULK_SIZE 512 +#define NR_USB_REQUEST_CHANNEL 0x12 + #define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE) #define WRITES_IN_FLIGHT (4) #define MAX_VENDOR_DESCRIPTOR_SIZE 256 @@ -90,6 +92,26 @@ success: return true; } +/* + * Need to ensure a channel is selected before submitting URBs + */ +static int udl_select_std_channel(struct udl_device *udl) +{ + int ret; + u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7, + 0x1C, 0x88, 0x5E, 0x15, + 0x60, 0xFE, 0xC6, 0x97, + 0x16, 0x3D, 0x47, 0xF2}; + + ret = usb_control_msg(udl->udev, + usb_sndctrlpipe(udl->udev, 0), + NR_USB_REQUEST_CHANNEL, + (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0, + set_def_chn, sizeof(set_def_chn), + USB_CTRL_SET_TIMEOUT); + return ret < 0 ? ret : 0; +} + static void udl_release_urb_work(struct work_struct *work) { struct urb_node *unode = container_of(work, struct urb_node, @@ -301,6 +323,9 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags) goto err; } + if (udl_select_std_channel(udl)) + DRM_ERROR("Selecting channel failed\n"); + if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) { DRM_ERROR("udl_alloc_urb_list failed\n"); goto err; diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index f92ea9579674..f2b2481cad52 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -309,6 +309,8 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc, char *wrptr; int color_depth = 0; + udl->crtc = crtc; + buf = (char *)udl->mode_buf; /* for now we just clip 24 -> 16 - if we fix that fix this */ @@ -441,8 +443,6 @@ int udl_modeset_init(struct drm_device *dev) dev->mode_config.funcs = &udl_mode_funcs; - drm_mode_create_dirty_info_property(dev); - udl_crtc_init(dev); encoder = udl_encoder_init(dev); @@ -452,6 +452,18 @@ int udl_modeset_init(struct drm_device *dev) return 0; } +void udl_modeset_restore(struct drm_device *dev) +{ + struct udl_device *udl = dev->dev_private; + struct udl_framebuffer *ufb; + + if (!udl->crtc || !udl->crtc->primary->fb) + return; + udl_crtc_commit(udl->crtc); + ufb = to_udl_fb(udl->crtc->primary->fb); + udl_handle_damage(ufb, 0, 0, ufb->base.width, ufb->base.height); +} + void udl_modeset_cleanup(struct drm_device *dev) { drm_mode_config_cleanup(dev); diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 8fc2b731b59a..2682f07d8f1e 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -163,14 +163,6 @@ int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id, int vblank_lines; int ret = 0; - /* - * XXX Doesn't work well in interlaced mode yet, partially due - * to problems in vc4 kms or drm core interlaced mode handling, - * so disable for now in interlaced mode. - */ - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - return ret; - /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ /* Get optional system timestamp before query. */ @@ -191,10 +183,15 @@ int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id, /* Vertical position of hvs composed scanline. */ *vpos = VC4_GET_FIELD(val, SCALER_DISPSTATX_LINE); + *hpos = 0; - /* No hpos info available. */ - if (hpos) - *hpos = 0; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) { + *vpos /= 2; + + /* Use hpos to correct for field offset in interlaced mode. */ + if (VC4_GET_FIELD(val, SCALER_DISPSTATX_FRAME_COUNT) % 2) + *hpos += mode->crtc_htotal / 2; + } /* This is the offset we need for translating hvs -> pv scanout pos. */ fifo_lines = vc4_crtc->cob_size / mode->crtc_hdisplay; @@ -217,8 +214,6 @@ int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id, * position of the PV. */ *vpos -= fifo_lines + 1; - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - *vpos /= 2; ret |= DRM_SCANOUTPOS_ACCURATE; return ret; @@ -480,6 +475,9 @@ static void vc4_crtc_disable(struct drm_crtc *crtc) int ret; require_hvs_enabled(dev); + /* Disable vblank irq handling before crtc is disabled. */ + drm_crtc_vblank_off(crtc); + CRTC_WRITE(PV_V_CONTROL, CRTC_READ(PV_V_CONTROL) & ~PV_VCONTROL_VIDEN); ret = wait_for(!(CRTC_READ(PV_V_CONTROL) & PV_VCONTROL_VIDEN), 1); @@ -530,6 +528,33 @@ static void vc4_crtc_enable(struct drm_crtc *crtc) /* Turn on the pixel valve, which will emit the vstart signal. */ CRTC_WRITE(PV_V_CONTROL, CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN); + + /* Enable vblank irq handling after crtc is started. */ + drm_crtc_vblank_on(crtc); +} + +static bool vc4_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + /* Do not allow doublescan modes from user space */ + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) { + DRM_DEBUG_KMS("[CRTC:%d] Doublescan mode rejected.\n", + crtc->base.id); + return false; + } + + /* + * Interlaced video modes got CRTC_INTERLACE_HALVE_V applied when + * coming from user space. We don't want this, as it screws up + * vblank timestamping, so fix it up. + */ + drm_mode_set_crtcinfo(adjusted_mode, 0); + + DRM_DEBUG_KMS("[CRTC:%d] adjusted_mode :\n", crtc->base.id); + drm_mode_debug_printmodeline(adjusted_mode); + + return true; } static int vc4_crtc_atomic_check(struct drm_crtc *crtc, @@ -819,6 +844,7 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = { .mode_set_nofb = vc4_crtc_mode_set_nofb, .disable = vc4_crtc_disable, .enable = vc4_crtc_enable, + .mode_fixup = vc4_crtc_mode_fixup, .atomic_check = vc4_crtc_atomic_check, .atomic_flush = vc4_crtc_atomic_flush, }; diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c index 275fedbdbd9e..1e1f6b8184d0 100644 --- a/drivers/gpu/drm/vc4/vc4_dpi.c +++ b/drivers/gpu/drm/vc4/vc4_dpi.c @@ -340,9 +340,20 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder) } } +static bool vc4_dpi_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) + return false; + + return true; +} + static const struct drm_encoder_helper_funcs vc4_dpi_encoder_helper_funcs = { .disable = vc4_dpi_encoder_disable, .enable = vc4_dpi_encoder_enable, + .mode_fixup = vc4_dpi_encoder_mode_fixup, }; static const struct of_device_id vc4_dpi_dt_match[] = { diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 8b42d31a7f0e..deec53545bea 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -16,6 +16,7 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include "drm_fb_cma_helper.h" +#include <drm/drm_fb_helper.h> #include "uapi/drm/vc4_drm.h" #include "vc4_drv.h" @@ -214,7 +215,7 @@ static void vc4_kick_out_firmware_fb(void) ap->ranges[0].base = 0; ap->ranges[0].size = ~0; - remove_conflicting_framebuffers(ap, "vc4drmfb", false); + drm_fb_helper_remove_conflicting_framebuffers(ap, "vc4drmfb", false); kfree(ap); } diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 6155e8aca1c6..27c52ec35193 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -419,10 +419,6 @@ again: vc4_flush_caches(dev); - /* Disable the binner's pre-loaded overflow memory address */ - V3D_WRITE(V3D_BPOA, 0); - V3D_WRITE(V3D_BPOS, 0); - /* Either put the job in the binner if it uses the binner, or * immediately move it to the to-be-rendered queue. */ diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 4452f3631cac..68ad10634b29 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -208,10 +208,35 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector) return ret; } +/* + * drm_helper_probe_single_connector_modes() applies drm_mode_set_crtcinfo to + * all modes with flag CRTC_INTERLACE_HALVE_V. We don't want this, as it + * screws up vblank timestamping for interlaced modes, so fix it up. + */ +static int vc4_hdmi_connector_probe_modes(struct drm_connector *connector, + uint32_t maxX, uint32_t maxY) +{ + struct drm_display_mode *mode; + int count; + + count = drm_helper_probe_single_connector_modes(connector, maxX, maxY); + if (count == 0) + return 0; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed adapted modes :\n", + connector->base.id, connector->name); + list_for_each_entry(mode, &connector->modes, head) { + drm_mode_set_crtcinfo(mode, 0); + drm_mode_debug_printmodeline(mode); + } + + return count; +} + static const struct drm_connector_funcs vc4_hdmi_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = vc4_hdmi_connector_detect, - .fill_modes = drm_helper_probe_single_connector_modes, + .fill_modes = vc4_hdmi_connector_probe_modes, .destroy = vc4_hdmi_connector_destroy, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, @@ -246,7 +271,7 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev, connector->polled = (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT); - connector->interlace_allowed = 0; + connector->interlace_allowed = 1; connector->doublescan_allowed = 0; drm_mode_connector_attach_encoder(connector, encoder); diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 4ac894d993cd..c1f65c6c8e60 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -44,7 +44,7 @@ vc4_atomic_complete_commit(struct vc4_commit *c) drm_atomic_helper_commit_modeset_disables(dev, state); - drm_atomic_helper_commit_planes(dev, state, false); + drm_atomic_helper_commit_planes(dev, state, 0); drm_atomic_helper_commit_modeset_enables(dev, state); diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 29e4b400e25e..881bf489478b 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -735,8 +735,6 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb) } static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = { - .prepare_fb = NULL, - .cleanup_fb = NULL, .atomic_check = vc4_plane_atomic_check, .atomic_update = vc4_plane_atomic_update, }; diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c index ed8aa8ff861a..e5582bab7e3c 100644 --- a/drivers/gpu/drm/via/via_drv.c +++ b/drivers/gpu/drm/via/via_drv.c @@ -72,7 +72,7 @@ static const struct file_operations via_driver_fops = { static struct drm_driver driver = { .driver_features = - DRIVER_USE_AGP | DRIVER_HAVE_IRQ | + DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_LEGACY | DRIVER_IRQ_SHARED, .load = via_driver_load, .unload = via_driver_unload, diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index 4e192aa2d021..7cf3678623c3 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -338,7 +338,8 @@ static void vgdev_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_modeset_enables(dev, state); - drm_atomic_helper_commit_planes(dev, state, true); + drm_atomic_helper_commit_planes(dev, state, + DRM_PLANE_COMMIT_ACTIVE_ONLY); drm_atomic_helper_commit_hw_done(state); diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c index 7f0e93f87a55..a59d0e309bfc 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c +++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c @@ -24,6 +24,7 @@ */ #include <linux/pci.h> +#include <drm/drm_fb_helper.h> #include "virtgpu_drv.h" @@ -42,7 +43,7 @@ static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev) primary = pci_dev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; - remove_conflicting_framebuffers(ap, "virtiodrmfb", primary); + drm_fb_helper_remove_conflicting_framebuffers(ap, "virtiodrmfb", primary); kfree(ap); } diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index c046903cb47b..512e7cddcbae 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -152,15 +152,10 @@ static int virtio_gpu_execbuffer(struct drm_device *dev, if (ret) goto out_free; - buf = kmalloc(exbuf->size, GFP_KERNEL); - if (!buf) { - ret = -ENOMEM; - goto out_unresv; - } - if (copy_from_user(buf, (void __user *)(uintptr_t)exbuf->command, - exbuf->size)) { - kfree(buf); - ret = -EFAULT; + buf = memdup_user((void __user *)(uintptr_t)exbuf->command, + exbuf->size); + if (IS_ERR(buf)) { + ret = PTR_ERR(buf); goto out_unresv; } virtio_gpu_cmd_submit(vgdev, buf, exbuf->size, diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig index b49445df8a7e..fb7b82aad763 100644 --- a/drivers/gpu/drm/vmwgfx/Kconfig +++ b/drivers/gpu/drm/vmwgfx/Kconfig @@ -6,6 +6,7 @@ config DRM_VMWGFX select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT select DRM_TTM + select FB # Only needed for the transitional use of drm_crtc_init - can be removed # again once vmwgfx sets up the primary plane itself. select DRM_KMS_HELPER diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 63ccd9871ec9..23ec673d5e16 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -377,9 +377,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) drm_mode_crtc_set_gamma_size(crtc, 256); drm_object_attach_property(&connector->base, - dev->mode_config.dirty_info_property, - 1); - drm_object_attach_property(&connector->base, dev_priv->hotplug_mode_update_property, 1); drm_object_attach_property(&connector->base, dev->mode_config.suggested_x_property, 0); @@ -421,10 +418,6 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv) if (ret != 0) goto err_free; - ret = drm_mode_create_dirty_info_property(dev); - if (ret != 0) - goto err_vblank_cleanup; - vmw_kms_create_implicit_placement_property(dev_priv, true); if (dev_priv->capabilities & SVGA_CAP_MULTIMON) @@ -439,8 +432,6 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv) return 0; -err_vblank_cleanup: - drm_vblank_cleanup(dev); err_free: kfree(dev_priv->ldu_priv); dev_priv->ldu_priv = NULL; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index b74eae2b8594..f42359084adc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -538,9 +538,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) drm_mode_crtc_set_gamma_size(crtc, 256); drm_object_attach_property(&connector->base, - dev->mode_config.dirty_info_property, - 1); - drm_object_attach_property(&connector->base, dev_priv->hotplug_mode_update_property, 1); drm_object_attach_property(&connector->base, dev->mode_config.suggested_x_property, 0); @@ -574,10 +571,6 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv) if (unlikely(ret != 0)) return ret; - ret = drm_mode_create_dirty_info_property(dev); - if (unlikely(ret != 0)) - goto err_vblank_cleanup; - vmw_kms_create_implicit_placement_property(dev_priv, false); for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) @@ -588,10 +581,6 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv) DRM_INFO("Screen Objects Display Unit initialized\n"); return 0; - -err_vblank_cleanup: - drm_vblank_cleanup(dev); - return ret; } int vmw_kms_sou_close_display(struct vmw_private *dev_priv) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 41932a7c4f79..94ad8d2acf9a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -1131,9 +1131,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit) drm_mode_crtc_set_gamma_size(crtc, 256); drm_object_attach_property(&connector->base, - dev->mode_config.dirty_info_property, - 1); - drm_object_attach_property(&connector->base, dev_priv->hotplug_mode_update_property, 1); drm_object_attach_property(&connector->base, dev->mode_config.suggested_x_property, 0); @@ -1202,10 +1199,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv) if (unlikely(ret != 0)) return ret; - ret = drm_mode_create_dirty_info_property(dev); - if (unlikely(ret != 0)) - goto err_vblank_cleanup; - dev_priv->active_display_unit = vmw_du_screen_target; vmw_kms_create_implicit_placement_property(dev_priv, false); |