summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c110
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c13
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h80
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h100
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c153
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c28
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c171
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c86
16 files changed, 603 insertions, 196 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 8dee52ce26d0..93cf73d6fa11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2188,15 +2188,8 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
adev->firmware.gpu_info_fw = NULL;
- if (adev->mman.discovery_bin) {
- /*
- * FIXME: The bounding box is still needed by Navi12, so
- * temporarily read it from gpu_info firmware. Should be dropped
- * when DAL no longer needs it.
- */
- if (adev->asic_type != CHIP_NAVI12)
- return 0;
- }
+ if (adev->mman.discovery_bin)
+ return 0;
switch (adev->asic_type) {
default:
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c8c00c2a5224..4e82ee4d74ac 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -6170,8 +6170,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
-
- if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
+ else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ stream->signal == SIGNAL_TYPE_EDP) {
//
// should decide stream support vsc sdp colorimetry capability
// before building vsc info packet
@@ -6187,8 +6188,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
tf = TRANSFER_FUNC_GAMMA_22;
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
- aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
+ if (stream->link->psr_settings.psr_feature_enabled)
+ aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
}
finish:
dc_sink_release(sink);
@@ -6914,8 +6916,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
- if (!mst_state->pbn_div)
- mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
+ mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
if (!state->duplicated) {
int max_bpc = conn_state->max_requested_bpc;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index ec77b2b41ba3..d2271e308fa0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -440,7 +440,115 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
.use_urgent_burst_bw = 0
};
-struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
+struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = {
+ .clock_limits = {
+ {
+ .state = 0,
+ .dcfclk_mhz = 560.0,
+ .fabricclk_mhz = 560.0,
+ .dispclk_mhz = 513.0,
+ .dppclk_mhz = 513.0,
+ .phyclk_mhz = 540.0,
+ .socclk_mhz = 560.0,
+ .dscclk_mhz = 171.0,
+ .dram_speed_mts = 1069.0,
+ },
+ {
+ .state = 1,
+ .dcfclk_mhz = 694.0,
+ .fabricclk_mhz = 694.0,
+ .dispclk_mhz = 642.0,
+ .dppclk_mhz = 642.0,
+ .phyclk_mhz = 600.0,
+ .socclk_mhz = 694.0,
+ .dscclk_mhz = 214.0,
+ .dram_speed_mts = 1324.0,
+ },
+ {
+ .state = 2,
+ .dcfclk_mhz = 875.0,
+ .fabricclk_mhz = 875.0,
+ .dispclk_mhz = 734.0,
+ .dppclk_mhz = 734.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 875.0,
+ .dscclk_mhz = 245.0,
+ .dram_speed_mts = 1670.0,
+ },
+ {
+ .state = 3,
+ .dcfclk_mhz = 1000.0,
+ .fabricclk_mhz = 1000.0,
+ .dispclk_mhz = 1100.0,
+ .dppclk_mhz = 1100.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1000.0,
+ .dscclk_mhz = 367.0,
+ .dram_speed_mts = 2000.0,
+ },
+ {
+ .state = 4,
+ .dcfclk_mhz = 1200.0,
+ .fabricclk_mhz = 1200.0,
+ .dispclk_mhz = 1284.0,
+ .dppclk_mhz = 1284.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1200.0,
+ .dscclk_mhz = 428.0,
+ .dram_speed_mts = 2000.0,
+ },
+ {
+ .state = 5,
+ .dcfclk_mhz = 1200.0,
+ .fabricclk_mhz = 1200.0,
+ .dispclk_mhz = 1284.0,
+ .dppclk_mhz = 1284.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1200.0,
+ .dscclk_mhz = 428.0,
+ .dram_speed_mts = 2000.0,
+ },
+ },
+
+ .num_states = 5,
+ .sr_exit_time_us = 1.9,
+ .sr_enter_plus_exit_time_us = 4.4,
+ .urgent_latency_us = 3.0,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
+ .max_avg_sdp_bw_use_normal_percent = 40.0,
+ .max_avg_dram_bw_use_normal_percent = 40.0,
+ .writeback_latency_us = 12.0,
+ .ideal_dram_bw_after_urgent_percent = 40.0,
+ .max_request_size_bytes = 256,
+ .dram_channel_width_bytes = 16,
+ .fabric_datapath_to_dcn_data_return_bytes = 64,
+ .dcn_downspread_percent = 0.5,
+ .downspread_percent = 0.5,
+ .dram_page_open_time_ns = 50.0,
+ .dram_rw_turnaround_time_ns = 17.5,
+ .dram_return_buffer_per_channel_bytes = 8192,
+ .round_trip_ping_latency_dcfclk_cycles = 131,
+ .urgent_out_of_order_return_per_channel_bytes = 4096,
+ .channel_interleave_bytes = 256,
+ .num_banks = 8,
+ .num_chans = 16,
+ .vmm_page_size_bytes = 4096,
+ .dram_clock_change_latency_us = 45.0,
+ .writeback_dram_clock_change_latency_us = 23.0,
+ .return_bus_width_bytes = 64,
+ .dispclk_dppclk_vco_speed_mhz = 3850,
+ .xfc_bus_transport_time_us = 20,
+ .xfc_xbuf_latency_tolerance_us = 50,
+ .use_urgent_burst_bw = 0,
+};
struct _vcs_dpi_ip_params_st dcn2_1_ip = {
.odm_capable = 1,
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index 84f9b412a4f1..738ee763f24a 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -147,12 +147,15 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
}
/* VSC packet set to 4 for PSR-SU, or 2 for PSR1 */
- if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
- vsc_packet_revision = vsc_packet_rev4;
- else if (stream->link->replay_settings.config.replay_supported)
+ if (stream->link->psr_settings.psr_feature_enabled) {
+ if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
+ vsc_packet_revision = vsc_packet_rev4;
+ else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
+ vsc_packet_revision = vsc_packet_rev2;
+ }
+
+ if (stream->link->replay_settings.config.replay_supported)
vsc_packet_revision = vsc_packet_rev4;
- else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
- vsc_packet_revision = vsc_packet_rev2;
/* Update to revision 5 for extended colorimetry support */
if (stream->use_vsc_sdp_for_colorimetry)
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 1c5049e894e3..c2ccf3724e37 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -318,6 +318,7 @@ enum pp_xgmi_plpd_mode {
#define MAX_GFX_CLKS 8
#define MAX_CLKS 4
#define NUM_VCN 4
+#define NUM_JPEG_ENG 32
struct seq_file;
enum amd_pp_clock_type;
@@ -774,6 +775,85 @@ struct gpu_metrics_v1_4 {
uint16_t padding;
};
+struct gpu_metrics_v1_5 {
+ struct metrics_table_header common_header;
+
+ /* Temperature (Celsius) */
+ uint16_t temperature_hotspot;
+ uint16_t temperature_mem;
+ uint16_t temperature_vrsoc;
+
+ /* Power (Watts) */
+ uint16_t curr_socket_power;
+
+ /* Utilization (%) */
+ uint16_t average_gfx_activity;
+ uint16_t average_umc_activity; // memory controller
+ uint16_t vcn_activity[NUM_VCN];
+ uint16_t jpeg_activity[NUM_JPEG_ENG];
+
+ /* Energy (15.259uJ (2^-16) units) */
+ uint64_t energy_accumulator;
+
+ /* Driver attached timestamp (in ns) */
+ uint64_t system_clock_counter;
+
+ /* Throttle status */
+ uint32_t throttle_status;
+
+ /* Clock Lock Status. Each bit corresponds to clock instance */
+ uint32_t gfxclk_lock_status;
+
+ /* Link width (number of lanes) and speed (in 0.1 GT/s) */
+ uint16_t pcie_link_width;
+ uint16_t pcie_link_speed;
+
+ /* XGMI bus width and bitrate (in Gbps) */
+ uint16_t xgmi_link_width;
+ uint16_t xgmi_link_speed;
+
+ /* Utilization Accumulated (%) */
+ uint32_t gfx_activity_acc;
+ uint32_t mem_activity_acc;
+
+ /*PCIE accumulated bandwidth (GB/sec) */
+ uint64_t pcie_bandwidth_acc;
+
+ /*PCIE instantaneous bandwidth (GB/sec) */
+ uint64_t pcie_bandwidth_inst;
+
+ /* PCIE L0 to recovery state transition accumulated count */
+ uint64_t pcie_l0_to_recov_count_acc;
+
+ /* PCIE replay accumulated count */
+ uint64_t pcie_replay_count_acc;
+
+ /* PCIE replay rollover accumulated count */
+ uint64_t pcie_replay_rover_count_acc;
+
+ /* PCIE NAK sent accumulated count */
+ uint32_t pcie_nak_sent_count_acc;
+
+ /* PCIE NAK received accumulated count */
+ uint32_t pcie_nak_rcvd_count_acc;
+
+ /* XGMI accumulated data transfer size(KiloBytes) */
+ uint64_t xgmi_read_data_acc[NUM_XGMI_LINKS];
+ uint64_t xgmi_write_data_acc[NUM_XGMI_LINKS];
+
+ /* PMFW attached timestamp (10ns resolution) */
+ uint64_t firmware_timestamp;
+
+ /* Current clocks (Mhz) */
+ uint16_t current_gfxclk[MAX_GFX_CLKS];
+ uint16_t current_socclk[MAX_CLKS];
+ uint16_t current_vclk0[MAX_CLKS];
+ uint16_t current_dclk0[MAX_CLKS];
+ uint16_t current_uclk;
+
+ uint16_t padding;
+};
+
/*
* gpu_metrics_v2_0 is not recommended as it's not naturally aligned.
* Use gpu_metrics_v2_1 or later instead.
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 49028dde0f87..20c53eefd680 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -2128,7 +2128,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (amdgpu_dpm_is_overdrive_supported(adev))
*states = ATTR_STATE_SUPPORTED;
} else if (DEVICE_ATTR_IS(mem_busy_percent)) {
- if (adev->flags & AMD_IS_APU || gc_ver == IP_VERSION(9, 0, 1))
+ if ((adev->flags & AMD_IS_APU &&
+ gc_ver != IP_VERSION(9, 4, 3)) ||
+ gc_ver == IP_VERSION(9, 0, 1))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pcie_bw)) {
/* PCIe Perf counters won't work on APU nodes */
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index fef2d290f3f2..7b812b9994d7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -123,7 +123,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
-#define SMU_METRICS_TABLE_VERSION 0x9
+#define SMU_METRICS_TABLE_VERSION 0xB
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@@ -219,7 +219,103 @@ typedef struct __attribute__((packed, aligned(4))) {
uint32_t PCIenReplayARolloverCountAcc; // The Pcie counter itself is accumulated
uint32_t PCIeNAKSentCountAcc; // The Pcie counter itself is accumulated
uint32_t PCIeNAKReceivedCountAcc; // The Pcie counter itself is accumulated
-} MetricsTable_t;
+
+ // VCN/JPEG ACTIVITY
+ uint32_t VcnBusy[4];
+ uint32_t JpegBusy[32];
+} MetricsTableX_t;
+
+typedef struct __attribute__((packed, aligned(4))) {
+ uint32_t AccumulationCounter;
+
+ //TEMPERATURE
+ uint32_t MaxSocketTemperature;
+ uint32_t MaxVrTemperature;
+ uint32_t MaxHbmTemperature;
+ uint64_t MaxSocketTemperatureAcc;
+ uint64_t MaxVrTemperatureAcc;
+ uint64_t MaxHbmTemperatureAcc;
+
+ //POWER
+ uint32_t SocketPowerLimit;
+ uint32_t MaxSocketPowerLimit;
+ uint32_t SocketPower;
+
+ //ENERGY
+ uint64_t Timestamp;
+ uint64_t SocketEnergyAcc;
+ uint64_t CcdEnergyAcc;
+ uint64_t XcdEnergyAcc;
+ uint64_t AidEnergyAcc;
+ uint64_t HbmEnergyAcc;
+
+ //FREQUENCY
+ uint32_t CclkFrequencyLimit;
+ uint32_t GfxclkFrequencyLimit;
+ uint32_t FclkFrequency;
+ uint32_t UclkFrequency;
+ uint32_t SocclkFrequency[4];
+ uint32_t VclkFrequency[4];
+ uint32_t DclkFrequency[4];
+ uint32_t LclkFrequency[4];
+ uint64_t GfxclkFrequencyAcc[8];
+ uint64_t CclkFrequencyAcc[96];
+
+ //FREQUENCY RANGE
+ uint32_t MaxCclkFrequency;
+ uint32_t MinCclkFrequency;
+ uint32_t MaxGfxclkFrequency;
+ uint32_t MinGfxclkFrequency;
+ uint32_t FclkFrequencyTable[4];
+ uint32_t UclkFrequencyTable[4];
+ uint32_t SocclkFrequencyTable[4];
+ uint32_t VclkFrequencyTable[4];
+ uint32_t DclkFrequencyTable[4];
+ uint32_t LclkFrequencyTable[4];
+ uint32_t MaxLclkDpmRange;
+ uint32_t MinLclkDpmRange;
+
+ //XGMI
+ uint32_t XgmiWidth;
+ uint32_t XgmiBitrate;
+ uint64_t XgmiReadBandwidthAcc[8];
+ uint64_t XgmiWriteBandwidthAcc[8];
+
+ //ACTIVITY
+ uint32_t SocketC0Residency;
+ uint32_t SocketGfxBusy;
+ uint32_t DramBandwidthUtilization;
+ uint64_t SocketC0ResidencyAcc;
+ uint64_t SocketGfxBusyAcc;
+ uint64_t DramBandwidthAcc;
+ uint32_t MaxDramBandwidth;
+ uint64_t DramBandwidthUtilizationAcc;
+ uint64_t PcieBandwidthAcc[4];
+
+ //THROTTLERS
+ uint32_t ProchotResidencyAcc;
+ uint32_t PptResidencyAcc;
+ uint32_t SocketThmResidencyAcc;
+ uint32_t VrThmResidencyAcc;
+ uint32_t HbmThmResidencyAcc;
+ uint32_t GfxLockXCDMak;
+
+ // New Items at end to maintain driver compatibility
+ uint32_t GfxclkFrequency[8];
+
+ //PSNs
+ uint64_t PublicSerialNumber_AID[4];
+ uint64_t PublicSerialNumber_XCD[8];
+ uint64_t PublicSerialNumber_CCD[12];
+
+ //XGMI Data tranfser size
+ uint64_t XgmiReadDataSizeAcc[8];//in KByte
+ uint64_t XgmiWriteDataSizeAcc[8];//in KByte
+
+ // VCN/JPEG ACTIVITY
+ uint32_t VcnBusy[4];
+ uint32_t JpegBusy[32];
+} MetricsTableA_t;
#define SMU_VF_METRICS_TABLE_VERSION 0x3
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 900a2d9e6d85..b64e07b75937 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -245,6 +245,8 @@ struct PPTable_t {
#define SMUQ10_TO_UINT(x) ((x) >> 10)
#define SMUQ10_FRAC(x) ((x) & 0x3ff)
#define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200))
+#define GET_METRIC_FIELD(field) ((adev->flags & AMD_IS_APU) ?\
+ (metrics_a->field) : (metrics_x->field))
struct smu_v13_0_6_dpm_map {
enum smu_clk_type clk_type;
@@ -327,7 +329,8 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(MetricsTable_t),
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS,
+ max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)),
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
@@ -335,12 +338,13 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
- smu_table->metrics_table = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
+ smu_table->metrics_table = kzalloc(max(sizeof(MetricsTableX_t),
+ sizeof(MetricsTableA_t)), GFP_KERNEL);
if (!smu_table->metrics_table)
return -ENOMEM;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_4);
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_5);
smu_table->gpu_metrics_table =
kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
if (!smu_table->gpu_metrics_table) {
@@ -431,9 +435,11 @@ static int smu_v13_0_6_get_metrics_table(struct smu_context *smu,
static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+ MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
+ MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
+ struct amdgpu_device *adev = smu->adev;
int ret, i, retry = 100;
/* Store one-time values in driver PPTable */
@@ -444,7 +450,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
return ret;
/* Ensure that metrics have been updated */
- if (metrics->AccumulationCounter)
+ if (GET_METRIC_FIELD(AccumulationCounter))
break;
usleep_range(1000, 1100);
@@ -454,29 +460,29 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
return -ETIME;
pptable->MaxSocketPowerLimit =
- SMUQ10_ROUND(metrics->MaxSocketPowerLimit);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketPowerLimit));
pptable->MaxGfxclkFrequency =
- SMUQ10_ROUND(metrics->MaxGfxclkFrequency);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency));
pptable->MinGfxclkFrequency =
- SMUQ10_ROUND(metrics->MinGfxclkFrequency);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency));
for (i = 0; i < 4; ++i) {
pptable->FclkFrequencyTable[i] =
- SMUQ10_ROUND(metrics->FclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequencyTable)[i]);
pptable->UclkFrequencyTable[i] =
- SMUQ10_ROUND(metrics->UclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequencyTable)[i]);
pptable->SocclkFrequencyTable[i] = SMUQ10_ROUND(
- metrics->SocclkFrequencyTable[i]);
+ GET_METRIC_FIELD(SocclkFrequencyTable)[i]);
pptable->VclkFrequencyTable[i] =
- SMUQ10_ROUND(metrics->VclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequencyTable)[i]);
pptable->DclkFrequencyTable[i] =
- SMUQ10_ROUND(metrics->DclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequencyTable)[i]);
pptable->LclkFrequencyTable[i] =
- SMUQ10_ROUND(metrics->LclkFrequencyTable[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(LclkFrequencyTable)[i]);
}
/* use AID0 serial number by default */
- pptable->PublicSerialNumber_AID = metrics->PublicSerialNumber_AID[0];
+ pptable->PublicSerialNumber_AID = GET_METRIC_FIELD(PublicSerialNumber_AID)[0];
pptable->Init = true;
}
@@ -778,7 +784,8 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
uint32_t *value)
{
struct smu_table_context *smu_table = &smu->smu_table;
- MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
+ MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
+ MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
struct amdgpu_device *adev = smu->adev;
int ret = 0;
int xcc_id;
@@ -793,50 +800,50 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
case METRICS_AVERAGE_GFXCLK:
if (smu->smc_fw_version >= 0x552F00) {
xcc_id = GET_INST(GC, 0);
- *value = SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]);
} else {
*value = 0;
}
break;
case METRICS_CURR_SOCCLK:
case METRICS_AVERAGE_SOCCLK:
- *value = SMUQ10_ROUND(metrics->SocclkFrequency[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[0]);
break;
case METRICS_CURR_UCLK:
case METRICS_AVERAGE_UCLK:
- *value = SMUQ10_ROUND(metrics->UclkFrequency);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency));
break;
case METRICS_CURR_VCLK:
- *value = SMUQ10_ROUND(metrics->VclkFrequency[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[0]);
break;
case METRICS_CURR_DCLK:
- *value = SMUQ10_ROUND(metrics->DclkFrequency[0]);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[0]);
break;
case METRICS_CURR_FCLK:
- *value = SMUQ10_ROUND(metrics->FclkFrequency);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(FclkFrequency));
break;
case METRICS_AVERAGE_GFXACTIVITY:
- *value = SMUQ10_ROUND(metrics->SocketGfxBusy);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy));
break;
case METRICS_AVERAGE_MEMACTIVITY:
- *value = SMUQ10_ROUND(metrics->DramBandwidthUtilization);
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization));
break;
case METRICS_CURR_SOCKETPOWER:
- *value = SMUQ10_ROUND(metrics->SocketPower) << 8;
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower)) << 8;
break;
case METRICS_TEMPERATURE_HOTSPOT:
- *value = SMUQ10_ROUND(metrics->MaxSocketTemperature) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_TEMPERATURE_MEM:
- *value = SMUQ10_ROUND(metrics->MaxHbmTemperature) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
/* This is the max of all VRs and not just SOC VR.
* No need to define another data type for the same.
*/
case METRICS_TEMPERATURE_VRSOC:
- *value = SMUQ10_ROUND(metrics->MaxVrTemperature) *
+ *value = SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature)) *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
default:
@@ -2022,67 +2029,70 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_4 *gpu_metrics =
- (struct gpu_metrics_v1_4 *)smu_table->gpu_metrics_table;
+ struct gpu_metrics_v1_5 *gpu_metrics =
+ (struct gpu_metrics_v1_5 *)smu_table->gpu_metrics_table;
struct amdgpu_device *adev = smu->adev;
- int ret = 0, xcc_id, inst, i;
- MetricsTable_t *metrics;
+ int ret = 0, xcc_id, inst, i, j;
+ MetricsTableX_t *metrics_x;
+ MetricsTableA_t *metrics_a;
u16 link_width_level;
- metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
- ret = smu_v13_0_6_get_metrics_table(smu, metrics, true);
+ metrics_x = kzalloc(max(sizeof(MetricsTableX_t), sizeof(MetricsTableA_t)), GFP_KERNEL);
+ ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true);
if (ret) {
- kfree(metrics);
+ kfree(metrics_x);
return ret;
}
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 4);
+ metrics_a = (MetricsTableA_t *)metrics_x;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 5);
gpu_metrics->temperature_hotspot =
- SMUQ10_ROUND(metrics->MaxSocketTemperature);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature));
/* Individual HBM stack temperature is not reported */
gpu_metrics->temperature_mem =
- SMUQ10_ROUND(metrics->MaxHbmTemperature);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxHbmTemperature));
/* Reports max temperature of all voltage rails */
gpu_metrics->temperature_vrsoc =
- SMUQ10_ROUND(metrics->MaxVrTemperature);
+ SMUQ10_ROUND(GET_METRIC_FIELD(MaxVrTemperature));
gpu_metrics->average_gfx_activity =
- SMUQ10_ROUND(metrics->SocketGfxBusy);
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusy));
gpu_metrics->average_umc_activity =
- SMUQ10_ROUND(metrics->DramBandwidthUtilization);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilization));
gpu_metrics->curr_socket_power =
- SMUQ10_ROUND(metrics->SocketPower);
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketPower));
/* Energy counter reported in 15.259uJ (2^-16) units */
- gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc;
+ gpu_metrics->energy_accumulator = GET_METRIC_FIELD(SocketEnergyAcc);
for (i = 0; i < MAX_GFX_CLKS; i++) {
xcc_id = GET_INST(GC, i);
if (xcc_id >= 0)
gpu_metrics->current_gfxclk[i] =
- SMUQ10_ROUND(metrics->GfxclkFrequency[xcc_id]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency)[xcc_id]);
if (i < MAX_CLKS) {
gpu_metrics->current_socclk[i] =
- SMUQ10_ROUND(metrics->SocclkFrequency[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocclkFrequency)[i]);
inst = GET_INST(VCN, i);
if (inst >= 0) {
gpu_metrics->current_vclk0[i] =
- SMUQ10_ROUND(metrics->VclkFrequency[inst]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(VclkFrequency)[inst]);
gpu_metrics->current_dclk0[i] =
- SMUQ10_ROUND(metrics->DclkFrequency[inst]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DclkFrequency)[inst]);
}
}
}
- gpu_metrics->current_uclk = SMUQ10_ROUND(metrics->UclkFrequency);
+ gpu_metrics->current_uclk = SMUQ10_ROUND(GET_METRIC_FIELD(UclkFrequency));
/* Throttle status is not reported through metrics now */
gpu_metrics->throttle_status = 0;
/* Clock Lock Status. Each bit corresponds to each GFXCLK instance */
- gpu_metrics->gfxclk_lock_status = metrics->GfxLockXCDMak >> GET_INST(GC, 0);
+ gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak) >> GET_INST(GC, 0);
if (!(adev->flags & AMD_IS_APU)) {
link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu);
@@ -2094,38 +2104,57 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->pcie_link_speed =
smu_v13_0_6_get_current_pcie_link_speed(smu);
gpu_metrics->pcie_bandwidth_acc =
- SMUQ10_ROUND(metrics->PcieBandwidthAcc[0]);
+ SMUQ10_ROUND(metrics_x->PcieBandwidthAcc[0]);
gpu_metrics->pcie_bandwidth_inst =
- SMUQ10_ROUND(metrics->PcieBandwidth[0]);
+ SMUQ10_ROUND(metrics_x->PcieBandwidth[0]);
gpu_metrics->pcie_l0_to_recov_count_acc =
- metrics->PCIeL0ToRecoveryCountAcc;
+ metrics_x->PCIeL0ToRecoveryCountAcc;
gpu_metrics->pcie_replay_count_acc =
- metrics->PCIenReplayAAcc;
+ metrics_x->PCIenReplayAAcc;
gpu_metrics->pcie_replay_rover_count_acc =
- metrics->PCIenReplayARolloverCountAcc;
+ metrics_x->PCIenReplayARolloverCountAcc;
+ gpu_metrics->pcie_nak_sent_count_acc =
+ metrics_x->PCIeNAKSentCountAcc;
+ gpu_metrics->pcie_nak_rcvd_count_acc =
+ metrics_x->PCIeNAKReceivedCountAcc;
}
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
gpu_metrics->gfx_activity_acc =
- SMUQ10_ROUND(metrics->SocketGfxBusyAcc);
+ SMUQ10_ROUND(GET_METRIC_FIELD(SocketGfxBusyAcc));
gpu_metrics->mem_activity_acc =
- SMUQ10_ROUND(metrics->DramBandwidthUtilizationAcc);
+ SMUQ10_ROUND(GET_METRIC_FIELD(DramBandwidthUtilizationAcc));
for (i = 0; i < NUM_XGMI_LINKS; i++) {
gpu_metrics->xgmi_read_data_acc[i] =
- SMUQ10_ROUND(metrics->XgmiReadDataSizeAcc[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(XgmiReadDataSizeAcc)[i]);
gpu_metrics->xgmi_write_data_acc[i] =
- SMUQ10_ROUND(metrics->XgmiWriteDataSizeAcc[i]);
+ SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWriteDataSizeAcc)[i]);
+ }
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ inst = GET_INST(JPEG, i);
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ gpu_metrics->jpeg_activity[(i * adev->jpeg.num_jpeg_rings) + j] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy)
+ [(inst * adev->jpeg.num_jpeg_rings) + j]);
+ }
+ }
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ inst = GET_INST(VCN, i);
+ gpu_metrics->vcn_activity[i] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy)[inst]);
}
- gpu_metrics->xgmi_link_width = SMUQ10_ROUND(metrics->XgmiWidth);
- gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(metrics->XgmiBitrate);
+ gpu_metrics->xgmi_link_width = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiWidth));
+ gpu_metrics->xgmi_link_speed = SMUQ10_ROUND(GET_METRIC_FIELD(XgmiBitrate));
- gpu_metrics->firmware_timestamp = metrics->Timestamp;
+ gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp);
*table = (void *)gpu_metrics;
- kfree(metrics);
+ kfree(metrics_x);
return sizeof(*gpu_metrics);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 001a5cf09657..00cd615bbcdc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -989,6 +989,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
case METRICS_VERSION(1, 4):
structure_size = sizeof(struct gpu_metrics_v1_4);
break;
+ case METRICS_VERSION(1, 5):
+ structure_size = sizeof(struct gpu_metrics_v1_5);
+ break;
case METRICS_VERSION(2, 0):
structure_size = sizeof(struct gpu_metrics_v2_0);
break;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index 2fa0445d8928..d1437c08645f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -187,7 +187,7 @@ struct nvkm_gsp {
void (*rpc_done)(struct nvkm_gsp *gsp, void *repv);
void *(*rm_ctrl_get)(struct nvkm_gsp_object *, u32 cmd, u32 argc);
- void *(*rm_ctrl_push)(struct nvkm_gsp_object *, void *argv, u32 repc);
+ int (*rm_ctrl_push)(struct nvkm_gsp_object *, void **argv, u32 repc);
void (*rm_ctrl_done)(struct nvkm_gsp_object *, void *repv);
void *(*rm_alloc_get)(struct nvkm_gsp_object *, u32 oclass, u32 argc);
@@ -265,7 +265,7 @@ nvkm_gsp_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc)
return object->client->gsp->rm->rm_ctrl_get(object, cmd, argc);
}
-static inline void *
+static inline int
nvkm_gsp_rm_ctrl_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
{
return object->client->gsp->rm->rm_ctrl_push(object, argv, repc);
@@ -275,21 +275,24 @@ static inline void *
nvkm_gsp_rm_ctrl_rd(struct nvkm_gsp_object *object, u32 cmd, u32 repc)
{
void *argv = nvkm_gsp_rm_ctrl_get(object, cmd, repc);
+ int ret;
if (IS_ERR(argv))
return argv;
- return nvkm_gsp_rm_ctrl_push(object, argv, repc);
+ ret = nvkm_gsp_rm_ctrl_push(object, &argv, repc);
+ if (ret)
+ return ERR_PTR(ret);
+ return argv;
}
static inline int
nvkm_gsp_rm_ctrl_wr(struct nvkm_gsp_object *object, void *argv)
{
- void *repv = nvkm_gsp_rm_ctrl_push(object, argv, 0);
-
- if (IS_ERR(repv))
- return PTR_ERR(repv);
+ int ret = nvkm_gsp_rm_ctrl_push(object, &argv, 0);
+ if (ret)
+ return ret;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index ca762ea55413..5057d976fa57 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -62,7 +62,7 @@ nouveau_fence_signal(struct nouveau_fence *fence)
if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
- if (!--fctx->notify_ref)
+ if (atomic_dec_and_test(&fctx->notify_ref))
drop = 1;
}
@@ -103,6 +103,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
void
nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
{
+ cancel_work_sync(&fctx->allow_block_work);
nouveau_fence_context_kill(fctx, 0);
nvif_event_dtor(&fctx->event);
fctx->dead = 1;
@@ -167,6 +168,18 @@ nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc
return ret;
}
+static void
+nouveau_fence_work_allow_block(struct work_struct *work)
+{
+ struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan,
+ allow_block_work);
+
+ if (atomic_read(&fctx->notify_ref) == 0)
+ nvif_event_block(&fctx->event);
+ else
+ nvif_event_allow(&fctx->event);
+}
+
void
nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
{
@@ -178,6 +191,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
} args;
int ret;
+ INIT_WORK(&fctx->allow_block_work, nouveau_fence_work_allow_block);
INIT_LIST_HEAD(&fctx->flip);
INIT_LIST_HEAD(&fctx->pending);
spin_lock_init(&fctx->lock);
@@ -521,15 +535,19 @@ static bool nouveau_fence_enable_signaling(struct dma_fence *f)
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
bool ret;
+ bool do_work;
- if (!fctx->notify_ref++)
- nvif_event_allow(&fctx->event);
+ if (atomic_inc_return(&fctx->notify_ref) == 0)
+ do_work = true;
ret = nouveau_fence_no_signaling(f);
if (ret)
set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
- else if (!--fctx->notify_ref)
- nvif_event_block(&fctx->event);
+ else if (atomic_dec_and_test(&fctx->notify_ref))
+ do_work = true;
+
+ if (do_work)
+ schedule_work(&fctx->allow_block_work);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 64d33ae7f356..28f5cf013b89 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -3,6 +3,7 @@
#define __NOUVEAU_FENCE_H__
#include <linux/dma-fence.h>
+#include <linux/workqueue.h>
#include <nvif/event.h>
struct nouveau_drm;
@@ -45,7 +46,9 @@ struct nouveau_fence_chan {
char name[32];
struct nvif_event event;
- int notify_ref, dead, killed;
+ struct work_struct allow_block_work;
+ atomic_t notify_ref;
+ int dead, killed;
};
struct nouveau_fence_priv {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index 457ec5db794d..b24eb1e560bc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -209,7 +209,7 @@ nvkm_disp_dtor(struct nvkm_engine *engine)
nvkm_head_del(&head);
}
- if (disp->func->dtor)
+ if (disp->func && disp->func->dtor)
disp->func->dtor(disp);
return data;
@@ -243,8 +243,10 @@ nvkm_disp_new_(const struct nvkm_disp_func *func, struct nvkm_device *device,
spin_lock_init(&disp->client.lock);
ret = nvkm_engine_ctor(&nvkm_disp, device, type, inst, true, &disp->engine);
- if (ret)
+ if (ret) {
+ disp->func = NULL;
return ret;
+ }
if (func->super) {
disp->super.wq = create_singlethread_workqueue("nvkm-disp");
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
index 298035070b3a..6a0a4d3b8902 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
@@ -282,7 +282,7 @@ r535_sor_bl_get(struct nvkm_ior *sor)
{
struct nvkm_disp *disp = sor->disp;
NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
- int lvl;
+ int ret, lvl;
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS,
@@ -292,9 +292,11 @@ r535_sor_bl_get(struct nvkm_ior *sor)
ctrl->displayId = BIT(sor->asy.outp->index);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
lvl = ctrl->brightness;
nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
@@ -649,9 +651,11 @@ r535_conn_new(struct nvkm_disp *disp, u32 id)
ctrl->subDeviceInstance = 0;
ctrl->displayId = BIT(id);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return (void *)ctrl;
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ERR_PTR(ret);
+ }
list_for_each_entry(conn, &disp->conns, head) {
if (conn->index == ctrl->data[0].index) {
@@ -686,7 +690,7 @@ r535_outp_acquire(struct nvkm_outp *outp, bool hda)
struct nvkm_disp *disp = outp->disp;
struct nvkm_ior *ior;
NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *ctrl;
- int or;
+ int ret, or;
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
NV0073_CTRL_CMD_DFP_ASSIGN_SOR, sizeof(*ctrl));
@@ -699,9 +703,11 @@ r535_outp_acquire(struct nvkm_outp *outp, bool hda)
if (hda)
ctrl->flags |= NVDEF(NV0073_CTRL, DFP_ASSIGN_SOR_FLAGS, AUDIO, OPTIMAL);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
for (or = 0; or < ARRAY_SIZE(ctrl->sorAssignListWithTag); or++) {
if (ctrl->sorAssignListWithTag[or].displayMask & BIT(outp->index)) {
@@ -727,6 +733,7 @@ static int
r535_disp_head_displayid(struct nvkm_disp *disp, int head, u32 *displayid)
{
NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl;
+ int ret;
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, sizeof(*ctrl));
@@ -736,9 +743,11 @@ r535_disp_head_displayid(struct nvkm_disp *disp, int head, u32 *displayid)
ctrl->subDeviceInstance = 0;
ctrl->head = head;
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
*displayid = ctrl->displayId;
nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
@@ -772,9 +781,11 @@ r535_outp_inherit(struct nvkm_outp *outp)
ctrl->subDeviceInstance = 0;
ctrl->displayId = displayid;
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
return NULL;
+ }
id = ctrl->index;
proto = ctrl->protocol;
@@ -825,6 +836,7 @@ r535_outp_dfp_get_info(struct nvkm_outp *outp)
{
NV0073_CTRL_DFP_GET_INFO_PARAMS *ctrl;
struct nvkm_disp *disp = outp->disp;
+ int ret;
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DFP_GET_INFO, sizeof(*ctrl));
if (IS_ERR(ctrl))
@@ -832,9 +844,11 @@ r535_outp_dfp_get_info(struct nvkm_outp *outp)
ctrl->displayId = BIT(outp->index);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
nvkm_debug(&disp->engine.subdev, "DFP %08x: flags:%08x flags2:%08x\n",
ctrl->displayId, ctrl->flags, ctrl->flags2);
@@ -858,9 +872,11 @@ r535_outp_detect(struct nvkm_outp *outp)
ctrl->subDeviceInstance = 0;
ctrl->displayMask = BIT(outp->index);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
if (ctrl->displayMask & BIT(outp->index)) {
ret = r535_outp_dfp_get_info(outp);
@@ -895,6 +911,7 @@ r535_dp_mst_id_get(struct nvkm_outp *outp, u32 *pid)
{
NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS *ctrl;
struct nvkm_disp *disp = outp->disp;
+ int ret;
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID,
@@ -904,9 +921,11 @@ r535_dp_mst_id_get(struct nvkm_outp *outp, u32 *pid)
ctrl->subDeviceInstance = 0;
ctrl->displayId = BIT(outp->index);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
*pid = ctrl->displayIdAssigned;
nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
@@ -938,38 +957,60 @@ r535_dp_train_target(struct nvkm_outp *outp, u8 target, bool mst, u8 link_nr, u8
{
struct nvkm_disp *disp = outp->disp;
NV0073_CTRL_DP_CTRL_PARAMS *ctrl;
- int ret;
-
- ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_CTRL, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ int ret, retries;
+ u32 cmd, data;
- ctrl->subDeviceInstance = 0;
- ctrl->displayId = BIT(outp->index);
- ctrl->cmd = NVDEF(NV0073_CTRL, DP_CMD, SET_LANE_COUNT, TRUE) |
- NVDEF(NV0073_CTRL, DP_CMD, SET_LINK_BW, TRUE) |
- NVDEF(NV0073_CTRL, DP_CMD, TRAIN_PHY_REPEATER, YES);
- ctrl->data = NVVAL(NV0073_CTRL, DP_DATA, SET_LANE_COUNT, link_nr) |
- NVVAL(NV0073_CTRL, DP_DATA, SET_LINK_BW, link_bw) |
- NVVAL(NV0073_CTRL, DP_DATA, TARGET, target);
+ cmd = NVDEF(NV0073_CTRL, DP_CMD, SET_LANE_COUNT, TRUE) |
+ NVDEF(NV0073_CTRL, DP_CMD, SET_LINK_BW, TRUE) |
+ NVDEF(NV0073_CTRL, DP_CMD, TRAIN_PHY_REPEATER, YES);
+ data = NVVAL(NV0073_CTRL, DP_DATA, SET_LANE_COUNT, link_nr) |
+ NVVAL(NV0073_CTRL, DP_DATA, SET_LINK_BW, link_bw) |
+ NVVAL(NV0073_CTRL, DP_DATA, TARGET, target);
if (mst)
- ctrl->cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_FORMAT_MODE, MULTI_STREAM);
+ cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_FORMAT_MODE, MULTI_STREAM);
if (outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
- ctrl->cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_ENHANCED_FRAMING, TRUE);
+ cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_ENHANCED_FRAMING, TRUE);
if (target == 0 &&
(outp->dp.dpcd[DPCD_RC02] & 0x20) &&
!(outp->dp.dpcd[DPCD_RC03] & DPCD_RC03_TPS4_SUPPORTED))
- ctrl->cmd |= NVDEF(NV0073_CTRL, DP_CMD, POST_LT_ADJ_REQ_GRANTED, YES);
+ cmd |= NVDEF(NV0073_CTRL, DP_CMD, POST_LT_ADJ_REQ_GRANTED, YES);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ /* We should retry up to 3 times, but only if GSP asks politely */
+ for (retries = 0; retries < 3; ++retries) {
+ ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_CTRL,
+ sizeof(*ctrl));
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+
+ ctrl->subDeviceInstance = 0;
+ ctrl->displayId = BIT(outp->index);
+ ctrl->retryTimeMs = 0;
+ ctrl->cmd = cmd;
+ ctrl->data = data;
+
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret == -EAGAIN && ctrl->retryTimeMs) {
+ /*
+ * Device (likely an eDP panel) isn't ready yet, wait for the time specified
+ * by GSP before retrying again
+ */
+ nvkm_debug(&disp->engine.subdev,
+ "Waiting %dms for GSP LT panel delay before retrying\n",
+ ctrl->retryTimeMs);
+ msleep(ctrl->retryTimeMs);
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ } else {
+ /* GSP didn't say to retry, or we were successful */
+ if (ctrl->err)
+ ret = -EIO;
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ break;
+ }
+ }
- ret = ctrl->err ? -EIO : 0;
- nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
return ret;
}
@@ -1036,9 +1077,11 @@ r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize)
ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0;
memcpy(ctrl->data, data, size);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
return PTR_ERR(ctrl);
+ }
memcpy(data, ctrl->data, size);
*psize = ctrl->size;
@@ -1111,10 +1154,13 @@ r535_tmds_edid_get(struct nvkm_outp *outp, u8 *data, u16 *psize)
ctrl->subDeviceInstance = 0;
ctrl->displayId = BIT(outp->index);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
+ ret = -E2BIG;
if (ctrl->bufferSize <= *psize) {
memcpy(data, ctrl->edidBuffer, ctrl->bufferSize);
*psize = ctrl->bufferSize;
@@ -1153,9 +1199,11 @@ r535_outp_new(struct nvkm_disp *disp, u32 id)
ctrl->subDeviceInstance = 0;
ctrl->displayId = BIT(id);
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
switch (ctrl->type) {
case NV0073_CTRL_SPECIFIC_OR_TYPE_NONE:
@@ -1229,9 +1277,11 @@ r535_outp_new(struct nvkm_disp *disp, u32 id)
ctrl->sorIndex = ~0;
- ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
- if (IS_ERR(ctrl))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
+ if (ret) {
+ nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+ return ret;
+ }
switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) {
case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62:
@@ -1465,8 +1515,6 @@ r535_disp_oneinit(struct nvkm_disp *disp)
bool nvhg = acpi_check_dsm(handle, &NVHG_DSM_GUID, NVHG_DSM_REV,
1ULL << 0x00000014);
- printk(KERN_ERR "bl: nbci:%d nvhg:%d\n", nbci, nvhg);
-
if (nbci || nvhg) {
union acpi_object argv4 = {
.buffer.type = ACPI_TYPE_BUFFER,
@@ -1479,9 +1527,6 @@ r535_disp_oneinit(struct nvkm_disp *disp)
if (!obj) {
acpi_handle_info(handle, "failed to evaluate _DSM\n");
} else {
- printk(KERN_ERR "bl: obj type %d\n", obj->type);
- printk(KERN_ERR "bl: obj len %d\n", obj->package.count);
-
for (int i = 0; i < obj->package.count; i++) {
union acpi_object *elt = &obj->package.elements[i];
u32 size;
@@ -1491,12 +1536,10 @@ r535_disp_oneinit(struct nvkm_disp *disp)
else
size = 4;
- printk(KERN_ERR "elt %03d: type %d size %d\n", i, elt->type, size);
memcpy(&ctrl->backLightData[ctrl->backLightDataSize], &elt->integer.value, size);
ctrl->backLightDataSize += size;
}
- printk(KERN_ERR "bl: data size %d\n", ctrl->backLightDataSize);
ctrl->status = 0;
ACPI_FREE(obj);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
index d088e636edc3..b903785056b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
@@ -242,6 +242,7 @@ r535_chan_id_put(struct nvkm_chan *chan)
nvkm_memory_unref(&userd->mem);
nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock);
list_del(&userd->head);
+ kfree(userd);
}
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index 44fb86841c05..9ee58e2a0eb2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -70,6 +70,20 @@ struct r535_gsp_msg {
#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data)
+static int
+r535_rpc_status_to_errno(uint32_t rpc_status)
+{
+ switch (rpc_status) {
+ case 0x55: /* NV_ERR_NOT_READY */
+ case 0x66: /* NV_ERR_TIMEOUT_RETRY */
+ return -EAGAIN;
+ case 0x51: /* NV_ERR_NO_MEMORY */
+ return -ENOMEM;
+ default:
+ return -EINVAL;
+ }
+}
+
static void *
r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime)
{
@@ -298,7 +312,8 @@ retry:
struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i];
if (ntfy->fn == msg->function) {
- ntfy->func(ntfy->priv, ntfy->fn, msg->data, msg->length - sizeof(*msg));
+ if (ntfy->func)
+ ntfy->func(ntfy->priv, ntfy->fn, msg->data, msg->length - sizeof(*msg));
break;
}
}
@@ -583,14 +598,14 @@ r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
return rpc;
if (rpc->status) {
- nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
- ret = ERR_PTR(-EINVAL);
+ ret = ERR_PTR(r535_rpc_status_to_errno(rpc->status));
+ if (PTR_ERR(ret) != -EAGAIN)
+ nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
} else {
ret = repc ? rpc->params : NULL;
}
- if (IS_ERR_OR_NULL(ret))
- nvkm_gsp_rpc_done(gsp, rpc);
+ nvkm_gsp_rpc_done(gsp, rpc);
return ret;
}
@@ -623,29 +638,34 @@ r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv)
{
rpc_gsp_rm_control_v03_00 *rpc = container_of(repv, typeof(*rpc), params);
+ if (!repv)
+ return;
nvkm_gsp_rpc_done(object->client->gsp, rpc);
}
-static void *
-r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
+static int
+r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **argv, u32 repc)
{
- rpc_gsp_rm_control_v03_00 *rpc = container_of(argv, typeof(*rpc), params);
+ rpc_gsp_rm_control_v03_00 *rpc = container_of((*argv), typeof(*rpc), params);
struct nvkm_gsp *gsp = object->client->gsp;
- void *ret;
+ int ret = 0;
rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc);
- if (IS_ERR_OR_NULL(rpc))
- return rpc;
+ if (IS_ERR_OR_NULL(rpc)) {
+ *argv = NULL;
+ return PTR_ERR(rpc);
+ }
if (rpc->status) {
- nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
- object->client->object.handle, object->handle, rpc->cmd, rpc->status);
- ret = ERR_PTR(-EINVAL);
- } else {
- ret = repc ? rpc->params : NULL;
+ ret = r535_rpc_status_to_errno(rpc->status);
+ if (ret != -EAGAIN)
+ nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
+ object->client->object.handle, object->handle, rpc->cmd, rpc->status);
}
- if (IS_ERR_OR_NULL(ret))
+ if (repc)
+ *argv = rpc->params;
+ else
nvkm_gsp_rpc_done(gsp, rpc);
return ret;
@@ -843,9 +863,11 @@ r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
if (IS_ERR(ctrl))
return PTR_ERR(ctrl);
- ctrl = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, ctrl, sizeof(*ctrl));
- if (WARN_ON(IS_ERR(ctrl)))
- return PTR_ERR(ctrl);
+ ret = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, &ctrl, sizeof(*ctrl));
+ if (WARN_ON(ret)) {
+ nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+ return ret;
+ }
for (unsigned i = 0; i < ctrl->tableLen; i++) {
enum nvkm_subdev_type type;
@@ -1099,16 +1121,12 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps)
if (!obj)
return;
- printk(KERN_ERR "nvop: obj type %d\n", obj->type);
- printk(KERN_ERR "nvop: obj len %d\n", obj->buffer.length);
-
if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
WARN_ON(obj->buffer.length != 4))
return;
caps->status = 0;
caps->optimusCaps = *(u32 *)obj->buffer.pointer;
- printk(KERN_ERR "nvop: caps %08x\n", caps->optimusCaps);
ACPI_FREE(obj);
@@ -1135,9 +1153,6 @@ r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt)
if (!obj)
return;
- printk(KERN_ERR "jt: obj type %d\n", obj->type);
- printk(KERN_ERR "jt: obj len %d\n", obj->buffer.length);
-
if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
WARN_ON(obj->buffer.length != 4))
return;
@@ -1146,7 +1161,6 @@ r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt)
jt->jtCaps = *(u32 *)obj->buffer.pointer;
jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20;
jt->bSBIOSCaps = 0;
- printk(KERN_ERR "jt: caps %08x rev:%04x\n", jt->jtCaps, jt->jtRevId);
ACPI_FREE(obj);
@@ -1157,6 +1171,8 @@ static void
r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode,
MUX_METHOD_DATA_ELEMENT *part)
{
+ union acpi_object mux_arg = { ACPI_TYPE_INTEGER };
+ struct acpi_object_list input = { 1, &mux_arg };
acpi_handle iter = NULL, handle_mux = NULL;
acpi_status status;
unsigned long long value;
@@ -1179,14 +1195,18 @@ r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode,
if (!handle_mux)
return;
- status = acpi_evaluate_integer(handle_mux, "MXDM", NULL, &value);
+ /* I -think- 0 means "acquire" according to nvidia's driver source */
+ input.pointer->integer.type = ACPI_TYPE_INTEGER;
+ input.pointer->integer.value = 0;
+
+ status = acpi_evaluate_integer(handle_mux, "MXDM", &input, &value);
if (ACPI_SUCCESS(status)) {
mode->acpiId = id;
mode->mode = value;
mode->status = 0;
}
- status = acpi_evaluate_integer(handle_mux, "MXDS", NULL, &value);
+ status = acpi_evaluate_integer(handle_mux, "MXDS", &input, &value);
if (ACPI_SUCCESS(status)) {
part->acpiId = id;
part->mode = value;
@@ -1232,8 +1252,8 @@ r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod)
dod->acpiIdListLen += sizeof(dod->acpiIdList[0]);
}
- printk(KERN_ERR "_DOD: ok! len:%d\n", dod->acpiIdListLen);
dod->status = 0;
+ kfree(output.pointer);
}
#endif
@@ -2186,7 +2206,9 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED,
r535_gsp_msg_mmu_fault_queued, gsp);
r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp);
-
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE, NULL, NULL);
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, NULL, NULL);
+ r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_SEND_USER_SHARED_DATA, NULL, NULL);
ret = r535_gsp_rm_boot_ctor(gsp);
if (ret)
return ret;