diff options
Diffstat (limited to 'drivers/gpu/nova-core/gsp')
| -rw-r--r-- | drivers/gpu/nova-core/gsp/boot.rs | 153 | ||||
| -rw-r--r-- | drivers/gpu/nova-core/gsp/cmdq.rs | 679 | ||||
| -rw-r--r-- | drivers/gpu/nova-core/gsp/commands.rs | 227 | ||||
| -rw-r--r-- | drivers/gpu/nova-core/gsp/fw.rs | 923 | ||||
| -rw-r--r-- | drivers/gpu/nova-core/gsp/fw/commands.rs | 128 | ||||
| -rw-r--r-- | drivers/gpu/nova-core/gsp/fw/r570_144.rs | 6 | ||||
| -rw-r--r-- | drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs | 950 | ||||
| -rw-r--r-- | drivers/gpu/nova-core/gsp/sequencer.rs | 407 |
8 files changed, 3451 insertions, 22 deletions
diff --git a/drivers/gpu/nova-core/gsp/boot.rs b/drivers/gpu/nova-core/gsp/boot.rs index 2800f3aee37d..54937606b5b0 100644 --- a/drivers/gpu/nova-core/gsp/boot.rs +++ b/drivers/gpu/nova-core/gsp/boot.rs @@ -1,21 +1,47 @@ // SPDX-License-Identifier: GPL-2.0 -use kernel::device; -use kernel::pci; -use kernel::prelude::*; - -use crate::driver::Bar0; -use crate::falcon::{gsp::Gsp, sec2::Sec2, Falcon}; -use crate::fb::FbLayout; -use crate::firmware::{ - booter::{BooterFirmware, BooterKind}, - fwsec::{FwsecCommand, FwsecFirmware}, - gsp::GspFirmware, - FIRMWARE_VERSION, +use kernel::{ + device, + dma::CoherentAllocation, + dma_write, + io::poll::read_poll_timeout, + pci, + prelude::*, + time::Delta, // +}; + +use crate::{ + driver::Bar0, + falcon::{ + gsp::Gsp, + sec2::Sec2, + Falcon, // + }, + fb::FbLayout, + firmware::{ + booter::{ + BooterFirmware, + BooterKind, // + }, + fwsec::{ + FwsecCommand, + FwsecFirmware, // + }, + gsp::GspFirmware, + FIRMWARE_VERSION, // + }, + gpu::Chipset, + gsp::{ + commands, + sequencer::{ + GspSequencer, + GspSequencerParams, // + }, + GspFwWprMeta, // + }, + regs, + vbios::Vbios, }; -use crate::gpu::Chipset; -use crate::regs; -use crate::vbios::Vbios; impl super::Gsp { /// Helper function to load and run the FWSEC-FRTS firmware and confirm that it has properly @@ -102,7 +128,7 @@ impl super::Gsp { /// /// Upon return, the GSP is up and running, and its runtime object given as return value. pub(crate) fn boot( - self: Pin<&mut Self>, + mut self: Pin<&mut Self>, pdev: &pci::Device<device::Bound>, bar: &Bar0, chipset: Chipset, @@ -113,17 +139,17 @@ impl super::Gsp { let bios = Vbios::new(dev, bar)?; - let _gsp_fw = KBox::pin_init( + let gsp_fw = KBox::pin_init( GspFirmware::new(dev, chipset, FIRMWARE_VERSION)?, GFP_KERNEL, )?; - let fb_layout = FbLayout::new(chipset, bar)?; + let fb_layout = FbLayout::new(chipset, bar, &gsp_fw)?; dev_dbg!(dev, "{:#x?}\n", fb_layout); Self::run_fwsec_frts(dev, gsp_falcon, bar, &bios, &fb_layout)?; - let _booter_loader = BooterFirmware::new( + let booter_loader = BooterFirmware::new( dev, BooterKind::Loader, chipset, @@ -132,6 +158,95 @@ impl super::Gsp { bar, )?; + let wpr_meta = + CoherentAllocation::<GspFwWprMeta>::alloc_coherent(dev, 1, GFP_KERNEL | __GFP_ZERO)?; + dma_write!(wpr_meta[0] = GspFwWprMeta::new(&gsp_fw, &fb_layout))?; + + self.cmdq + .send_command(bar, commands::SetSystemInfo::new(pdev))?; + self.cmdq.send_command(bar, commands::SetRegistry::new())?; + + gsp_falcon.reset(bar)?; + let libos_handle = self.libos.dma_handle(); + let (mbox0, mbox1) = gsp_falcon.boot( + bar, + Some(libos_handle as u32), + Some((libos_handle >> 32) as u32), + )?; + dev_dbg!( + pdev.as_ref(), + "GSP MBOX0: {:#x}, MBOX1: {:#x}\n", + mbox0, + mbox1 + ); + + dev_dbg!( + pdev.as_ref(), + "Using SEC2 to load and run the booter_load firmware...\n" + ); + + sec2_falcon.reset(bar)?; + sec2_falcon.dma_load(bar, &booter_loader)?; + let wpr_handle = wpr_meta.dma_handle(); + let (mbox0, mbox1) = sec2_falcon.boot( + bar, + Some(wpr_handle as u32), + Some((wpr_handle >> 32) as u32), + )?; + dev_dbg!( + pdev.as_ref(), + "SEC2 MBOX0: {:#x}, MBOX1{:#x}\n", + mbox0, + mbox1 + ); + + if mbox0 != 0 { + dev_err!( + pdev.as_ref(), + "Booter-load failed with error {:#x}\n", + mbox0 + ); + return Err(ENODEV); + } + + gsp_falcon.write_os_version(bar, gsp_fw.bootloader.app_version); + + // Poll for RISC-V to become active before running sequencer + read_poll_timeout( + || Ok(gsp_falcon.is_riscv_active(bar)), + |val: &bool| *val, + Delta::from_millis(10), + Delta::from_secs(5), + )?; + + dev_dbg!( + pdev.as_ref(), + "RISC-V active? {}\n", + gsp_falcon.is_riscv_active(bar), + ); + + // Create and run the GSP sequencer. + let seq_params = GspSequencerParams { + bootloader_app_version: gsp_fw.bootloader.app_version, + libos_dma_handle: libos_handle, + gsp_falcon, + sec2_falcon, + dev: pdev.as_ref().into(), + bar, + }; + GspSequencer::run(&mut self.cmdq, seq_params)?; + + // Wait until GSP is fully initialized. + commands::wait_gsp_init_done(&mut self.cmdq)?; + + // Obtain and display basic GPU information. + let info = commands::get_gsp_info(&mut self.cmdq, bar)?; + dev_info!( + pdev.as_ref(), + "GPU name: {}\n", + info.gpu_name().unwrap_or("invalid GPU name") + ); + Ok(()) } } diff --git a/drivers/gpu/nova-core/gsp/cmdq.rs b/drivers/gpu/nova-core/gsp/cmdq.rs new file mode 100644 index 000000000000..6f946d14868a --- /dev/null +++ b/drivers/gpu/nova-core/gsp/cmdq.rs @@ -0,0 +1,679 @@ +// SPDX-License-Identifier: GPL-2.0 + +use core::{ + cmp, + mem, + sync::atomic::{ + fence, + Ordering, // + }, // +}; + +use kernel::{ + device, + dma::{ + CoherentAllocation, + DmaAddress, // + }, + dma_write, + io::poll::read_poll_timeout, + prelude::*, + sync::aref::ARef, + time::Delta, + transmute::{ + AsBytes, + FromBytes, // + }, +}; + +use crate::{ + driver::Bar0, + gsp::{ + fw::{ + GspMsgElement, + MsgFunction, + MsgqRxHeader, + MsgqTxHeader, // + }, + PteArray, + GSP_PAGE_SHIFT, + GSP_PAGE_SIZE, // + }, + num, + regs, + sbuffer::SBufferIter, // +}; + +/// Trait implemented by types representing a command to send to the GSP. +/// +/// The main purpose of this trait is to provide [`Cmdq::send_command`] with the information it +/// needs to send a given command. +/// +/// [`CommandToGsp::init`] in particular is responsible for initializing the command directly +/// into the space reserved for it in the command queue buffer. +/// +/// Some commands may be followed by a variable-length payload. For these, the +/// [`CommandToGsp::variable_payload_len`] and [`CommandToGsp::init_variable_payload`] need to be +/// defined as well. +pub(crate) trait CommandToGsp { + /// Function identifying this command to the GSP. + const FUNCTION: MsgFunction; + + /// Type generated by [`CommandToGsp::init`], to be written into the command queue buffer. + type Command: FromBytes + AsBytes; + + /// Error type returned by [`CommandToGsp::init`]. + type InitError; + + /// In-place command initializer responsible for filling the command in the command queue + /// buffer. + fn init(&self) -> impl Init<Self::Command, Self::InitError>; + + /// Size of the variable-length payload following the command structure generated by + /// [`CommandToGsp::init`]. + /// + /// Most commands don't have a variable-length payload, so this is zero by default. + fn variable_payload_len(&self) -> usize { + 0 + } + + /// Method initializing the variable-length payload. + /// + /// The command buffer is circular, which means that we may need to jump back to its beginning + /// while in the middle of a command. For this reason, the variable-length payload is + /// initialized using a [`SBufferIter`]. + /// + /// This method will receive a buffer of the length returned by + /// [`CommandToGsp::variable_payload_len`], and must write every single byte of it. Leaving + /// unwritten space will lead to an error. + /// + /// Most commands don't have a variable-length payload, so this does nothing by default. + fn init_variable_payload( + &self, + _dst: &mut SBufferIter<core::array::IntoIter<&mut [u8], 2>>, + ) -> Result { + Ok(()) + } +} + +/// Trait representing messages received from the GSP. +/// +/// This trait tells [`Cmdq::receive_msg`] how it can receive a given type of message. +pub(crate) trait MessageFromGsp: Sized { + /// Function identifying this message from the GSP. + const FUNCTION: MsgFunction; + + /// Error type returned by [`MessageFromGsp::read`]. + type InitError; + + /// Type containing the raw message to be read from the message queue. + type Message: FromBytes; + + /// Method reading the message from the message queue and returning it. + /// + /// From a `Self::Message` and a [`SBufferIter`], constructs an instance of `Self` and returns + /// it. + fn read( + msg: &Self::Message, + sbuffer: &mut SBufferIter<core::array::IntoIter<&[u8], 2>>, + ) -> Result<Self, Self::InitError>; +} + +/// Number of GSP pages making the [`Msgq`]. +pub(crate) const MSGQ_NUM_PAGES: u32 = 0x3f; + +/// Circular buffer of a [`Msgq`]. +/// +/// This area of memory is to be shared between the driver and the GSP to exchange commands or +/// messages. +#[repr(C, align(0x1000))] +#[derive(Debug)] +struct MsgqData { + data: [[u8; GSP_PAGE_SIZE]; num::u32_as_usize(MSGQ_NUM_PAGES)], +} + +// Annoyingly we are forced to use a literal to specify the alignment of +// `MsgqData`, so check that it corresponds to the actual GSP page size here. +static_assert!(align_of::<MsgqData>() == GSP_PAGE_SIZE); + +/// Unidirectional message queue. +/// +/// Contains the data for a message queue, that either the driver or GSP writes to. +/// +/// Note that while the write pointer of `tx` corresponds to the `msgq` of the same instance, the +/// read pointer of `rx` actually refers to the `Msgq` owned by the other side. +/// This design ensures that only the driver or GSP ever writes to a given instance of this struct. +#[repr(C)] +// There is no struct defined for this in the open-gpu-kernel-source headers. +// Instead it is defined by code in `GspMsgQueuesInit()`. +struct Msgq { + /// Header for sending messages, including the write pointer. + tx: MsgqTxHeader, + /// Header for receiving messages, including the read pointer. + rx: MsgqRxHeader, + /// The message queue proper. + msgq: MsgqData, +} + +/// Structure shared between the driver and the GSP and containing the command and message queues. +#[repr(C)] +struct GspMem { + /// Self-mapping page table entries. + ptes: PteArray<{ GSP_PAGE_SIZE / size_of::<u64>() }>, + /// CPU queue: the driver writes commands here, and the GSP reads them. It also contains the + /// write and read pointers that the CPU updates. + /// + /// This member is read-only for the GSP. + cpuq: Msgq, + /// GSP queue: the GSP writes messages here, and the driver reads them. It also contains the + /// write and read pointers that the GSP updates. + /// + /// This member is read-only for the driver. + gspq: Msgq, +} + +// SAFETY: These structs don't meet the no-padding requirements of AsBytes but +// that is not a problem because they are not used outside the kernel. +unsafe impl AsBytes for GspMem {} + +// SAFETY: These structs don't meet the no-padding requirements of FromBytes but +// that is not a problem because they are not used outside the kernel. +unsafe impl FromBytes for GspMem {} + +/// Wrapper around [`GspMem`] to share it with the GPU using a [`CoherentAllocation`]. +/// +/// This provides the low-level functionality to communicate with the GSP, including allocation of +/// queue space to write messages to and management of read/write pointers. +/// +/// This is shared with the GSP, with clear ownership rules regarding the command queues: +/// +/// * The driver owns (i.e. can write to) the part of the CPU message queue between the CPU write +/// pointer and the GSP read pointer. This region is returned by [`Self::driver_write_area`]. +/// * The driver owns (i.e. can read from) the part of the GSP message queue between the CPU read +/// pointer and the GSP write pointer. This region is returned by [`Self::driver_read_area`]. +struct DmaGspMem(CoherentAllocation<GspMem>); + +impl DmaGspMem { + /// Allocate a new instance and map it for `dev`. + fn new(dev: &device::Device<device::Bound>) -> Result<Self> { + const MSGQ_SIZE: u32 = num::usize_into_u32::<{ size_of::<Msgq>() }>(); + const RX_HDR_OFF: u32 = num::usize_into_u32::<{ mem::offset_of!(Msgq, rx) }>(); + + let gsp_mem = + CoherentAllocation::<GspMem>::alloc_coherent(dev, 1, GFP_KERNEL | __GFP_ZERO)?; + dma_write!(gsp_mem[0].ptes = PteArray::new(gsp_mem.dma_handle())?)?; + dma_write!(gsp_mem[0].cpuq.tx = MsgqTxHeader::new(MSGQ_SIZE, RX_HDR_OFF, MSGQ_NUM_PAGES))?; + dma_write!(gsp_mem[0].cpuq.rx = MsgqRxHeader::new())?; + + Ok(Self(gsp_mem)) + } + + /// Returns the region of the CPU message queue that the driver is currently allowed to write + /// to. + /// + /// As the message queue is a circular buffer, the region may be discontiguous in memory. In + /// that case the second slice will have a non-zero length. + fn driver_write_area(&mut self) -> (&mut [[u8; GSP_PAGE_SIZE]], &mut [[u8; GSP_PAGE_SIZE]]) { + let tx = self.cpu_write_ptr() as usize; + let rx = self.gsp_read_ptr() as usize; + + // SAFETY: + // - The `CoherentAllocation` contains exactly one object. + // - We will only access the driver-owned part of the shared memory. + // - Per the safety statement of the function, no concurrent access will be performed. + let gsp_mem = &mut unsafe { self.0.as_slice_mut(0, 1) }.unwrap()[0]; + // PANIC: per the invariant of `cpu_write_ptr`, `tx` is `<= MSGQ_NUM_PAGES`. + let (before_tx, after_tx) = gsp_mem.cpuq.msgq.data.split_at_mut(tx); + + if rx <= tx { + // The area from `tx` up to the end of the ring, and from the beginning of the ring up + // to `rx`, minus one unit, belongs to the driver. + if rx == 0 { + let last = after_tx.len() - 1; + (&mut after_tx[..last], &mut before_tx[0..0]) + } else { + (after_tx, &mut before_tx[..rx]) + } + } else { + // The area from `tx` to `rx`, minus one unit, belongs to the driver. + // + // PANIC: per the invariants of `cpu_write_ptr` and `gsp_read_ptr`, `rx` and `tx` are + // `<= MSGQ_NUM_PAGES`, and the test above ensured that `rx > tx`. + (after_tx.split_at_mut(rx - tx).0, &mut before_tx[0..0]) + } + } + + /// Returns the region of the GSP message queue that the driver is currently allowed to read + /// from. + /// + /// As the message queue is a circular buffer, the region may be discontiguous in memory. In + /// that case the second slice will have a non-zero length. + fn driver_read_area(&self) -> (&[[u8; GSP_PAGE_SIZE]], &[[u8; GSP_PAGE_SIZE]]) { + let tx = self.gsp_write_ptr() as usize; + let rx = self.cpu_read_ptr() as usize; + + // SAFETY: + // - The `CoherentAllocation` contains exactly one object. + // - We will only access the driver-owned part of the shared memory. + // - Per the safety statement of the function, no concurrent access will be performed. + let gsp_mem = &unsafe { self.0.as_slice(0, 1) }.unwrap()[0]; + // PANIC: per the invariant of `cpu_read_ptr`, `xx` is `<= MSGQ_NUM_PAGES`. + let (before_rx, after_rx) = gsp_mem.gspq.msgq.data.split_at(rx); + + match tx.cmp(&rx) { + cmp::Ordering::Equal => (&after_rx[0..0], &after_rx[0..0]), + cmp::Ordering::Greater => (&after_rx[..tx], &before_rx[0..0]), + cmp::Ordering::Less => (after_rx, &before_rx[..tx]), + } + } + + /// Allocates a region on the command queue that is large enough to send a command of `size` + /// bytes. + /// + /// This returns a [`GspCommand`] ready to be written to by the caller. + /// + /// # Errors + /// + /// - `EAGAIN` if the driver area is too small to hold the requested command. + /// - `EIO` if the command header is not properly aligned. + fn allocate_command(&mut self, size: usize) -> Result<GspCommand<'_>> { + // Get the current writable area as an array of bytes. + let (slice_1, slice_2) = { + let (slice_1, slice_2) = self.driver_write_area(); + + #[allow(clippy::incompatible_msrv)] + (slice_1.as_flattened_mut(), slice_2.as_flattened_mut()) + }; + + // If the GSP is still processing previous messages the shared region + // may be full in which case we will have to retry once the GSP has + // processed the existing commands. + if size_of::<GspMsgElement>() + size > slice_1.len() + slice_2.len() { + return Err(EAGAIN); + } + + // Extract area for the `GspMsgElement`. + let (header, slice_1) = GspMsgElement::from_bytes_mut_prefix(slice_1).ok_or(EIO)?; + + // Create the contents area. + let (slice_1, slice_2) = if slice_1.len() > size { + // Contents fits entirely in `slice_1`. + (&mut slice_1[..size], &mut slice_2[0..0]) + } else { + // Need all of `slice_1` and some of `slice_2`. + let slice_2_len = size - slice_1.len(); + (slice_1, &mut slice_2[..slice_2_len]) + }; + + Ok(GspCommand { + header, + contents: (slice_1, slice_2), + }) + } + + // Returns the index of the memory page the GSP will write the next message to. + // + // # Invariants + // + // - The returned value is between `0` and `MSGQ_NUM_PAGES`. + fn gsp_write_ptr(&self) -> u32 { + let gsp_mem = self.0.start_ptr(); + + // SAFETY: + // - The 'CoherentAllocation' contains at least one object. + // - By the invariants of `CoherentAllocation` the pointer is valid. + (unsafe { (*gsp_mem).gspq.tx.write_ptr() } % MSGQ_NUM_PAGES) + } + + // Returns the index of the memory page the GSP will read the next command from. + // + // # Invariants + // + // - The returned value is between `0` and `MSGQ_NUM_PAGES`. + fn gsp_read_ptr(&self) -> u32 { + let gsp_mem = self.0.start_ptr(); + + // SAFETY: + // - The 'CoherentAllocation' contains at least one object. + // - By the invariants of `CoherentAllocation` the pointer is valid. + (unsafe { (*gsp_mem).gspq.rx.read_ptr() } % MSGQ_NUM_PAGES) + } + + // Returns the index of the memory page the CPU can read the next message from. + // + // # Invariants + // + // - The returned value is between `0` and `MSGQ_NUM_PAGES`. + fn cpu_read_ptr(&self) -> u32 { + let gsp_mem = self.0.start_ptr(); + + // SAFETY: + // - The ['CoherentAllocation'] contains at least one object. + // - By the invariants of CoherentAllocation the pointer is valid. + (unsafe { (*gsp_mem).cpuq.rx.read_ptr() } % MSGQ_NUM_PAGES) + } + + // Informs the GSP that it can send `elem_count` new pages into the message queue. + fn advance_cpu_read_ptr(&mut self, elem_count: u32) { + let rptr = self.cpu_read_ptr().wrapping_add(elem_count) % MSGQ_NUM_PAGES; + + // Ensure read pointer is properly ordered. + fence(Ordering::SeqCst); + + let gsp_mem = self.0.start_ptr_mut(); + + // SAFETY: + // - The 'CoherentAllocation' contains at least one object. + // - By the invariants of `CoherentAllocation` the pointer is valid. + unsafe { (*gsp_mem).cpuq.rx.set_read_ptr(rptr) }; + } + + // Returns the index of the memory page the CPU can write the next command to. + // + // # Invariants + // + // - The returned value is between `0` and `MSGQ_NUM_PAGES`. + fn cpu_write_ptr(&self) -> u32 { + let gsp_mem = self.0.start_ptr(); + + // SAFETY: + // - The 'CoherentAllocation' contains at least one object. + // - By the invariants of `CoherentAllocation` the pointer is valid. + (unsafe { (*gsp_mem).cpuq.tx.write_ptr() } % MSGQ_NUM_PAGES) + } + + // Informs the GSP that it can process `elem_count` new pages from the command queue. + fn advance_cpu_write_ptr(&mut self, elem_count: u32) { + let wptr = self.cpu_write_ptr().wrapping_add(elem_count) & MSGQ_NUM_PAGES; + let gsp_mem = self.0.start_ptr_mut(); + + // SAFETY: + // - The 'CoherentAllocation' contains at least one object. + // - By the invariants of `CoherentAllocation` the pointer is valid. + unsafe { (*gsp_mem).cpuq.tx.set_write_ptr(wptr) }; + + // Ensure all command data is visible before triggering the GSP read. + fence(Ordering::SeqCst); + } +} + +/// A command ready to be sent on the command queue. +/// +/// This is the type returned by [`DmaGspMem::allocate_command`]. +struct GspCommand<'a> { + // Writable reference to the header of the command. + header: &'a mut GspMsgElement, + // Writable slices to the contents of the command. The second slice is zero unless the command + // loops over the command queue. + contents: (&'a mut [u8], &'a mut [u8]), +} + +/// A message ready to be processed from the message queue. +/// +/// This is the type returned by [`Cmdq::wait_for_msg`]. +struct GspMessage<'a> { + // Reference to the header of the message. + header: &'a GspMsgElement, + // Slices to the contents of the message. The second slice is zero unless the message loops + // over the message queue. + contents: (&'a [u8], &'a [u8]), +} + +/// GSP command queue. +/// +/// Provides the ability to send commands and receive messages from the GSP using a shared memory +/// area. +pub(crate) struct Cmdq { + /// Device this command queue belongs to. + dev: ARef<device::Device>, + /// Current command sequence number. + seq: u32, + /// Memory area shared with the GSP for communicating commands and messages. + gsp_mem: DmaGspMem, +} + +impl Cmdq { + /// Offset of the data after the PTEs. + const POST_PTE_OFFSET: usize = core::mem::offset_of!(GspMem, cpuq); + + /// Offset of command queue ring buffer. + pub(crate) const CMDQ_OFFSET: usize = core::mem::offset_of!(GspMem, cpuq) + + core::mem::offset_of!(Msgq, msgq) + - Self::POST_PTE_OFFSET; + + /// Offset of message queue ring buffer. + pub(crate) const STATQ_OFFSET: usize = core::mem::offset_of!(GspMem, gspq) + + core::mem::offset_of!(Msgq, msgq) + - Self::POST_PTE_OFFSET; + + /// Number of page table entries for the GSP shared region. + pub(crate) const NUM_PTES: usize = size_of::<GspMem>() >> GSP_PAGE_SHIFT; + + /// Creates a new command queue for `dev`. + pub(crate) fn new(dev: &device::Device<device::Bound>) -> Result<Cmdq> { + let gsp_mem = DmaGspMem::new(dev)?; + + Ok(Cmdq { + dev: dev.into(), + seq: 0, + gsp_mem, + }) + } + + /// Computes the checksum for the message pointed to by `it`. + /// + /// A message is made of several parts, so `it` is an iterator over byte slices representing + /// these parts. + fn calculate_checksum<T: Iterator<Item = u8>>(it: T) -> u32 { + let sum64 = it + .enumerate() + .map(|(idx, byte)| (((idx % 8) * 8) as u32, byte)) + .fold(0, |acc, (rol, byte)| acc ^ u64::from(byte).rotate_left(rol)); + + ((sum64 >> 32) as u32) ^ (sum64 as u32) + } + + /// Notifies the GSP that we have updated the command queue pointers. + fn notify_gsp(bar: &Bar0) { + regs::NV_PGSP_QUEUE_HEAD::default() + .set_address(0) + .write(bar); + } + + /// Sends `command` to the GSP. + /// + /// # Errors + /// + /// - `EAGAIN` if there was not enough space in the command queue to send the command. + /// - `EIO` if the variable payload requested by the command has not been entirely + /// written to by its [`CommandToGsp::init_variable_payload`] method. + /// + /// Error codes returned by the command initializers are propagated as-is. + pub(crate) fn send_command<M>(&mut self, bar: &Bar0, command: M) -> Result + where + M: CommandToGsp, + // This allows all error types, including `Infallible`, to be used for `M::InitError`. + Error: From<M::InitError>, + { + let command_size = size_of::<M::Command>() + command.variable_payload_len(); + let dst = self.gsp_mem.allocate_command(command_size)?; + + // Extract area for the command itself. + let (cmd, payload_1) = M::Command::from_bytes_mut_prefix(dst.contents.0).ok_or(EIO)?; + + // Fill the header and command in-place. + let msg_element = GspMsgElement::init(self.seq, command_size, M::FUNCTION); + // SAFETY: `msg_header` and `cmd` are valid references, and not touched if the initializer + // fails. + unsafe { + msg_element.__init(core::ptr::from_mut(dst.header))?; + command.init().__init(core::ptr::from_mut(cmd))?; + } + + // Fill the variable-length payload. + if command_size > size_of::<M::Command>() { + let mut sbuffer = + SBufferIter::new_writer([&mut payload_1[..], &mut dst.contents.1[..]]); + command.init_variable_payload(&mut sbuffer)?; + + if !sbuffer.is_empty() { + return Err(EIO); + } + } + + // Compute checksum now that the whole message is ready. + dst.header + .set_checksum(Cmdq::calculate_checksum(SBufferIter::new_reader([ + dst.header.as_bytes(), + dst.contents.0, + dst.contents.1, + ]))); + + dev_dbg!( + &self.dev, + "GSP RPC: send: seq# {}, function={}, length=0x{:x}\n", + self.seq, + M::FUNCTION, + dst.header.length(), + ); + + // All set - update the write pointer and inform the GSP of the new command. + let elem_count = dst.header.element_count(); + self.seq += 1; + self.gsp_mem.advance_cpu_write_ptr(elem_count); + Cmdq::notify_gsp(bar); + + Ok(()) + } + + /// Wait for a message to become available on the message queue. + /// + /// This works purely at the transport layer and does not interpret or validate the message + /// beyond the advertised length in its [`GspMsgElement`]. + /// + /// This method returns: + /// + /// - A reference to the [`GspMsgElement`] of the message, + /// - Two byte slices with the contents of the message. The second slice is empty unless the + /// message loops across the message queue. + /// + /// # Errors + /// + /// - `ETIMEDOUT` if `timeout` has elapsed before any message becomes available. + /// - `EIO` if there was some inconsistency (e.g. message shorter than advertised) on the + /// message queue. + /// + /// Error codes returned by the message constructor are propagated as-is. + fn wait_for_msg(&self, timeout: Delta) -> Result<GspMessage<'_>> { + // Wait for a message to arrive from the GSP. + let (slice_1, slice_2) = read_poll_timeout( + || Ok(self.gsp_mem.driver_read_area()), + |driver_area| !driver_area.0.is_empty(), + Delta::from_millis(1), + timeout, + ) + .map(|(slice_1, slice_2)| { + #[allow(clippy::incompatible_msrv)] + (slice_1.as_flattened(), slice_2.as_flattened()) + })?; + + // Extract the `GspMsgElement`. + let (header, slice_1) = GspMsgElement::from_bytes_prefix(slice_1).ok_or(EIO)?; + + dev_dbg!( + self.dev, + "GSP RPC: receive: seq# {}, function={:?}, length=0x{:x}\n", + header.sequence(), + header.function(), + header.length(), + ); + + // Check that the driver read area is large enough for the message. + if slice_1.len() + slice_2.len() < header.length() { + return Err(EIO); + } + + // Cut the message slices down to the actual length of the message. + let (slice_1, slice_2) = if slice_1.len() > header.length() { + // PANIC: we checked above that `slice_1` is at least as long as `msg_header.length()`. + (slice_1.split_at(header.length()).0, &slice_2[0..0]) + } else { + ( + slice_1, + // PANIC: we checked above that `slice_1.len() + slice_2.len()` is at least as + // large as `msg_header.length()`. + slice_2.split_at(header.length() - slice_1.len()).0, + ) + }; + + // Validate checksum. + if Cmdq::calculate_checksum(SBufferIter::new_reader([ + header.as_bytes(), + slice_1, + slice_2, + ])) != 0 + { + dev_err!( + self.dev, + "GSP RPC: receive: Call {} - bad checksum", + header.sequence() + ); + return Err(EIO); + } + + Ok(GspMessage { + header, + contents: (slice_1, slice_2), + }) + } + + /// Receive a message from the GSP. + /// + /// `init` is a closure tasked with processing the message. It receives a reference to the + /// message in the message queue, and a [`SBufferIter`] pointing to its variable-length + /// payload, if any. + /// + /// The expected message is specified using the `M` generic parameter. If the pending message + /// is different, `EAGAIN` is returned and the unexpected message is dropped. + /// + /// This design is by no means final, but it is simple and will let us go through GSP + /// initialization. + /// + /// # Errors + /// + /// - `ETIMEDOUT` if `timeout` has elapsed before any message becomes available. + /// - `EIO` if there was some inconsistency (e.g. message shorter than advertised) on the + /// message queue. + /// - `EINVAL` if the function of the message was unrecognized. + pub(crate) fn receive_msg<M: MessageFromGsp>(&mut self, timeout: Delta) -> Result<M> + where + // This allows all error types, including `Infallible`, to be used for `M::InitError`. + Error: From<M::InitError>, + { + let message = self.wait_for_msg(timeout)?; + let function = message.header.function().map_err(|_| EINVAL)?; + + // Extract the message. Store the result as we want to advance the read pointer even in + // case of failure. + let result = if function == M::FUNCTION { + let (cmd, contents_1) = M::Message::from_bytes_prefix(message.contents.0).ok_or(EIO)?; + let mut sbuffer = SBufferIter::new_reader([contents_1, message.contents.1]); + + M::read(cmd, &mut sbuffer).map_err(|e| e.into()) + } else { + Err(ERANGE) + }; + + // Advance the read pointer past this message. + self.gsp_mem.advance_cpu_read_ptr(u32::try_from( + message.header.length().div_ceil(GSP_PAGE_SIZE), + )?); + + result + } + + /// Returns the DMA handle of the command queue's shared memory region. + pub(crate) fn dma_handle(&self) -> DmaAddress { + self.gsp_mem.0.dma_handle() + } +} diff --git a/drivers/gpu/nova-core/gsp/commands.rs b/drivers/gpu/nova-core/gsp/commands.rs new file mode 100644 index 000000000000..0425c65b5d6f --- /dev/null +++ b/drivers/gpu/nova-core/gsp/commands.rs @@ -0,0 +1,227 @@ +// SPDX-License-Identifier: GPL-2.0 + +use core::{ + array, + convert::Infallible, // +}; + +use kernel::{ + device, + pci, + prelude::*, + time::Delta, + transmute::{ + AsBytes, + FromBytes, // + }, // +}; + +use crate::{ + driver::Bar0, + gsp::{ + cmdq::{ + Cmdq, + CommandToGsp, + MessageFromGsp, // + }, + fw::{ + commands::*, + MsgFunction, // + }, + }, + sbuffer::SBufferIter, + util, +}; + +/// The `GspSetSystemInfo` command. +pub(crate) struct SetSystemInfo<'a> { + pdev: &'a pci::Device<device::Bound>, +} + +impl<'a> SetSystemInfo<'a> { + /// Creates a new `GspSetSystemInfo` command using the parameters of `pdev`. + pub(crate) fn new(pdev: &'a pci::Device<device::Bound>) -> Self { + Self { pdev } + } +} + +impl<'a> CommandToGsp for SetSystemInfo<'a> { + const FUNCTION: MsgFunction = MsgFunction::GspSetSystemInfo; + type Command = GspSetSystemInfo; + type InitError = Error; + + fn init(&self) -> impl Init<Self::Command, Self::InitError> { + GspSetSystemInfo::init(self.pdev) + } +} + +struct RegistryEntry { + key: &'static str, + value: u32, +} + +/// The `SetRegistry` command. +pub(crate) struct SetRegistry { + entries: [RegistryEntry; Self::NUM_ENTRIES], +} + +impl SetRegistry { + // For now we hard-code the registry entries. Future work will allow others to + // be added as module parameters. + const NUM_ENTRIES: usize = 3; + + /// Creates a new `SetRegistry` command, using a set of hardcoded entries. + pub(crate) fn new() -> Self { + Self { + entries: [ + // RMSecBusResetEnable - enables PCI secondary bus reset + RegistryEntry { + key: "RMSecBusResetEnable", + value: 1, + }, + // RMForcePcieConfigSave - forces GSP-RM to preserve PCI configuration registers on + // any PCI reset. + RegistryEntry { + key: "RMForcePcieConfigSave", + value: 1, + }, + // RMDevidCheckIgnore - allows GSP-RM to boot even if the PCI dev ID is not found + // in the internal product name database. + RegistryEntry { + key: "RMDevidCheckIgnore", + value: 1, + }, + ], + } + } +} + +impl CommandToGsp for SetRegistry { + const FUNCTION: MsgFunction = MsgFunction::SetRegistry; + type Command = PackedRegistryTable; + type InitError = Infallible; + + fn init(&self) -> impl Init<Self::Command, Self::InitError> { + PackedRegistryTable::init(Self::NUM_ENTRIES as u32, self.variable_payload_len() as u32) + } + + fn variable_payload_len(&self) -> usize { + let mut key_size = 0; + for i in 0..Self::NUM_ENTRIES { + key_size += self.entries[i].key.len() + 1; // +1 for NULL terminator + } + Self::NUM_ENTRIES * size_of::<PackedRegistryEntry>() + key_size + } + + fn init_variable_payload( + &self, + dst: &mut SBufferIter<core::array::IntoIter<&mut [u8], 2>>, + ) -> Result { + let string_data_start_offset = + size_of::<PackedRegistryTable>() + Self::NUM_ENTRIES * size_of::<PackedRegistryEntry>(); + + // Array for string data. + let mut string_data = KVec::new(); + + for entry in self.entries.iter().take(Self::NUM_ENTRIES) { + dst.write_all( + PackedRegistryEntry::new( + (string_data_start_offset + string_data.len()) as u32, + entry.value, + ) + .as_bytes(), + )?; + + let key_bytes = entry.key.as_bytes(); + string_data.extend_from_slice(key_bytes, GFP_KERNEL)?; + string_data.push(0, GFP_KERNEL)?; + } + + dst.write_all(string_data.as_slice()) + } +} + +/// Message type for GSP initialization done notification. +struct GspInitDone {} + +// SAFETY: `GspInitDone` is a zero-sized type with no bytes, therefore it +// trivially has no uninitialized bytes. +unsafe impl FromBytes for GspInitDone {} + +impl MessageFromGsp for GspInitDone { + const FUNCTION: MsgFunction = MsgFunction::GspInitDone; + type InitError = Infallible; + type Message = GspInitDone; + + fn read( + _msg: &Self::Message, + _sbuffer: &mut SBufferIter<array::IntoIter<&[u8], 2>>, + ) -> Result<Self, Self::InitError> { + Ok(GspInitDone {}) + } +} + +/// Waits for GSP initialization to complete. +pub(crate) fn wait_gsp_init_done(cmdq: &mut Cmdq) -> Result { + loop { + match cmdq.receive_msg::<GspInitDone>(Delta::from_secs(10)) { + Ok(_) => break Ok(()), + Err(ERANGE) => continue, + Err(e) => break Err(e), + } + } +} + +/// The `GetGspStaticInfo` command. +struct GetGspStaticInfo; + +impl CommandToGsp for GetGspStaticInfo { + const FUNCTION: MsgFunction = MsgFunction::GetGspStaticInfo; + type Command = GspStaticConfigInfo; + type InitError = Infallible; + + fn init(&self) -> impl Init<Self::Command, Self::InitError> { + GspStaticConfigInfo::init_zeroed() + } +} + +/// The reply from the GSP to the [`GetGspInfo`] command. +pub(crate) struct GetGspStaticInfoReply { + gpu_name: [u8; 64], +} + +impl MessageFromGsp for GetGspStaticInfoReply { + const FUNCTION: MsgFunction = MsgFunction::GetGspStaticInfo; + type Message = GspStaticConfigInfo; + type InitError = Infallible; + + fn read( + msg: &Self::Message, + _sbuffer: &mut SBufferIter<array::IntoIter<&[u8], 2>>, + ) -> Result<Self, Self::InitError> { + Ok(GetGspStaticInfoReply { + gpu_name: msg.gpu_name_str(), + }) + } +} + +impl GetGspStaticInfoReply { + /// Returns the name of the GPU as a string, or `None` if the string given by the GSP was + /// invalid. + pub(crate) fn gpu_name(&self) -> Option<&str> { + util::str_from_null_terminated(&self.gpu_name) + } +} + +/// Send the [`GetGspInfo`] command and awaits for its reply. +pub(crate) fn get_gsp_info(cmdq: &mut Cmdq, bar: &Bar0) -> Result<GetGspStaticInfoReply> { + cmdq.send_command(bar, GetGspStaticInfo)?; + + loop { + match cmdq.receive_msg::<GetGspStaticInfoReply>(Delta::from_secs(5)) { + Ok(info) => return Ok(info), + Err(ERANGE) => continue, + Err(e) => return Err(e), + } + } +} diff --git a/drivers/gpu/nova-core/gsp/fw.rs b/drivers/gpu/nova-core/gsp/fw.rs index 34226dd00982..abffd6beec65 100644 --- a/drivers/gpu/nova-core/gsp/fw.rs +++ b/drivers/gpu/nova-core/gsp/fw.rs @@ -1,7 +1,928 @@ // SPDX-License-Identifier: GPL-2.0 +pub(crate) mod commands; mod r570_144; // Alias to avoid repeating the version number with every use. -#[expect(unused)] use r570_144 as bindings; + +use core::ops::Range; + +use kernel::{ + dma::CoherentAllocation, + fmt, + prelude::*, + ptr::{ + Alignable, + Alignment, // + }, + sizes::{ + SZ_128K, + SZ_1M, // + }, + transmute::{ + AsBytes, + FromBytes, // + }, +}; + +use crate::{ + fb::FbLayout, + firmware::gsp::GspFirmware, + gpu::Chipset, + gsp::{ + cmdq::Cmdq, // + GSP_PAGE_SIZE, + }, + num::{ + self, + FromSafeCast, // + }, +}; + +/// Empty type to group methods related to heap parameters for running the GSP firmware. +enum GspFwHeapParams {} + +/// Minimum required alignment for the GSP heap. +const GSP_HEAP_ALIGNMENT: Alignment = Alignment::new::<{ 1 << 20 }>(); + +impl GspFwHeapParams { + /// Returns the amount of GSP-RM heap memory used during GSP-RM boot and initialization (up to + /// and including the first client subdevice allocation). + fn base_rm_size(_chipset: Chipset) -> u64 { + // TODO: this needs to be updated to return the correct value for Hopper+ once support for + // them is added: + // u64::from(bindings::GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100) + u64::from(bindings::GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X) + } + + /// Returns the amount of heap memory required to support a single channel allocation. + fn client_alloc_size() -> u64 { + u64::from(bindings::GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE) + .align_up(GSP_HEAP_ALIGNMENT) + .unwrap_or(u64::MAX) + } + + /// Returns the amount of memory to reserve for management purposes for a framebuffer of size + /// `fb_size`. + fn management_overhead(fb_size: u64) -> u64 { + let fb_size_gb = fb_size.div_ceil(u64::from_safe_cast(kernel::sizes::SZ_1G)); + + u64::from(bindings::GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB) + .saturating_mul(fb_size_gb) + .align_up(GSP_HEAP_ALIGNMENT) + .unwrap_or(u64::MAX) + } +} + +/// Heap memory requirements and constraints for a given version of the GSP LIBOS. +pub(crate) struct LibosParams { + /// The base amount of heap required by the GSP operating system, in bytes. + carveout_size: u64, + /// The minimum and maximum sizes allowed for the GSP FW heap, in bytes. + allowed_heap_size: Range<u64>, +} + +impl LibosParams { + /// Version 2 of the GSP LIBOS (Turing and GA100) + const LIBOS2: LibosParams = LibosParams { + carveout_size: num::u32_as_u64(bindings::GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2), + allowed_heap_size: num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB) + * num::usize_as_u64(SZ_1M) + ..num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MAX_MB) + * num::usize_as_u64(SZ_1M), + }; + + /// Version 3 of the GSP LIBOS (GA102+) + const LIBOS3: LibosParams = LibosParams { + carveout_size: num::u32_as_u64(bindings::GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL), + allowed_heap_size: num::u32_as_u64( + bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB, + ) * num::usize_as_u64(SZ_1M) + ..num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MAX_MB) + * num::usize_as_u64(SZ_1M), + }; + + /// Returns the libos parameters corresponding to `chipset`. + pub(crate) fn from_chipset(chipset: Chipset) -> &'static LibosParams { + if chipset < Chipset::GA102 { + &Self::LIBOS2 + } else { + &Self::LIBOS3 + } + } + + /// Returns the amount of memory (in bytes) to allocate for the WPR heap for a framebuffer size + /// of `fb_size` (in bytes) for `chipset`. + pub(crate) fn wpr_heap_size(&self, chipset: Chipset, fb_size: u64) -> u64 { + // The WPR heap will contain the following: + // LIBOS carveout, + self.carveout_size + // RM boot working memory, + .saturating_add(GspFwHeapParams::base_rm_size(chipset)) + // One RM client, + .saturating_add(GspFwHeapParams::client_alloc_size()) + // Overhead for memory management. + .saturating_add(GspFwHeapParams::management_overhead(fb_size)) + // Clamp to the supported heap sizes. + .clamp(self.allowed_heap_size.start, self.allowed_heap_size.end - 1) + } +} + +/// Structure passed to the GSP bootloader, containing the framebuffer layout as well as the DMA +/// addresses of the GSP bootloader and firmware. +#[repr(transparent)] +pub(crate) struct GspFwWprMeta(bindings::GspFwWprMeta); + +// SAFETY: Padding is explicit and does not contain uninitialized data. +unsafe impl AsBytes for GspFwWprMeta {} + +// SAFETY: This struct only contains integer types for which all bit patterns +// are valid. +unsafe impl FromBytes for GspFwWprMeta {} + +type GspFwWprMetaBootResumeInfo = r570_144::GspFwWprMeta__bindgen_ty_1; +type GspFwWprMetaBootInfo = r570_144::GspFwWprMeta__bindgen_ty_1__bindgen_ty_1; + +impl GspFwWprMeta { + /// Fill in and return a `GspFwWprMeta` suitable for booting `gsp_firmware` using the + /// `fb_layout` layout. + pub(crate) fn new(gsp_firmware: &GspFirmware, fb_layout: &FbLayout) -> Self { + Self(bindings::GspFwWprMeta { + // CAST: we want to store the bits of `GSP_FW_WPR_META_MAGIC` unmodified. + magic: r570_144::GSP_FW_WPR_META_MAGIC as u64, + revision: u64::from(r570_144::GSP_FW_WPR_META_REVISION), + sysmemAddrOfRadix3Elf: gsp_firmware.radix3_dma_handle(), + sizeOfRadix3Elf: u64::from_safe_cast(gsp_firmware.size), + sysmemAddrOfBootloader: gsp_firmware.bootloader.ucode.dma_handle(), + sizeOfBootloader: u64::from_safe_cast(gsp_firmware.bootloader.ucode.size()), + bootloaderCodeOffset: u64::from(gsp_firmware.bootloader.code_offset), + bootloaderDataOffset: u64::from(gsp_firmware.bootloader.data_offset), + bootloaderManifestOffset: u64::from(gsp_firmware.bootloader.manifest_offset), + __bindgen_anon_1: GspFwWprMetaBootResumeInfo { + __bindgen_anon_1: GspFwWprMetaBootInfo { + sysmemAddrOfSignature: gsp_firmware.signatures.dma_handle(), + sizeOfSignature: u64::from_safe_cast(gsp_firmware.signatures.size()), + }, + }, + gspFwRsvdStart: fb_layout.heap.start, + nonWprHeapOffset: fb_layout.heap.start, + nonWprHeapSize: fb_layout.heap.end - fb_layout.heap.start, + gspFwWprStart: fb_layout.wpr2.start, + gspFwHeapOffset: fb_layout.wpr2_heap.start, + gspFwHeapSize: fb_layout.wpr2_heap.end - fb_layout.wpr2_heap.start, + gspFwOffset: fb_layout.elf.start, + bootBinOffset: fb_layout.boot.start, + frtsOffset: fb_layout.frts.start, + frtsSize: fb_layout.frts.end - fb_layout.frts.start, + gspFwWprEnd: fb_layout + .vga_workspace + .start + .align_down(Alignment::new::<SZ_128K>()), + gspFwHeapVfPartitionCount: fb_layout.vf_partition_count, + fbSize: fb_layout.fb.end - fb_layout.fb.start, + vgaWorkspaceOffset: fb_layout.vga_workspace.start, + vgaWorkspaceSize: fb_layout.vga_workspace.end - fb_layout.vga_workspace.start, + ..Default::default() + }) + } +} + +#[derive(Copy, Clone, Debug, PartialEq)] +#[repr(u32)] +pub(crate) enum MsgFunction { + // Common function codes + Nop = bindings::NV_VGPU_MSG_FUNCTION_NOP, + SetGuestSystemInfo = bindings::NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO, + AllocRoot = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_ROOT, + AllocDevice = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_DEVICE, + AllocMemory = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY, + AllocCtxDma = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CTX_DMA, + AllocChannelDma = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CHANNEL_DMA, + MapMemory = bindings::NV_VGPU_MSG_FUNCTION_MAP_MEMORY, + BindCtxDma = bindings::NV_VGPU_MSG_FUNCTION_BIND_CTX_DMA, + AllocObject = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_OBJECT, + Free = bindings::NV_VGPU_MSG_FUNCTION_FREE, + Log = bindings::NV_VGPU_MSG_FUNCTION_LOG, + GetGspStaticInfo = bindings::NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, + SetRegistry = bindings::NV_VGPU_MSG_FUNCTION_SET_REGISTRY, + GspSetSystemInfo = bindings::NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, + GspInitPostObjGpu = bindings::NV_VGPU_MSG_FUNCTION_GSP_INIT_POST_OBJGPU, + GspRmControl = bindings::NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, + GetStaticInfo = bindings::NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO, + + // Event codes + GspInitDone = bindings::NV_VGPU_MSG_EVENT_GSP_INIT_DONE, + GspRunCpuSequencer = bindings::NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER, + PostEvent = bindings::NV_VGPU_MSG_EVENT_POST_EVENT, + RcTriggered = bindings::NV_VGPU_MSG_EVENT_RC_TRIGGERED, + MmuFaultQueued = bindings::NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED, + OsErrorLog = bindings::NV_VGPU_MSG_EVENT_OS_ERROR_LOG, + GspPostNoCat = bindings::NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD, + GspLockdownNotice = bindings::NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE, + UcodeLibOsPrint = bindings::NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT, +} + +impl fmt::Display for MsgFunction { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + // Common function codes + MsgFunction::Nop => write!(f, "NOP"), + MsgFunction::SetGuestSystemInfo => write!(f, "SET_GUEST_SYSTEM_INFO"), + MsgFunction::AllocRoot => write!(f, "ALLOC_ROOT"), + MsgFunction::AllocDevice => write!(f, "ALLOC_DEVICE"), + MsgFunction::AllocMemory => write!(f, "ALLOC_MEMORY"), + MsgFunction::AllocCtxDma => write!(f, "ALLOC_CTX_DMA"), + MsgFunction::AllocChannelDma => write!(f, "ALLOC_CHANNEL_DMA"), + MsgFunction::MapMemory => write!(f, "MAP_MEMORY"), + MsgFunction::BindCtxDma => write!(f, "BIND_CTX_DMA"), + MsgFunction::AllocObject => write!(f, "ALLOC_OBJECT"), + MsgFunction::Free => write!(f, "FREE"), + MsgFunction::Log => write!(f, "LOG"), + MsgFunction::GetGspStaticInfo => write!(f, "GET_GSP_STATIC_INFO"), + MsgFunction::SetRegistry => write!(f, "SET_REGISTRY"), + MsgFunction::GspSetSystemInfo => write!(f, "GSP_SET_SYSTEM_INFO"), + MsgFunction::GspInitPostObjGpu => write!(f, "GSP_INIT_POST_OBJGPU"), + MsgFunction::GspRmControl => write!(f, "GSP_RM_CONTROL"), + MsgFunction::GetStaticInfo => write!(f, "GET_STATIC_INFO"), + + // Event codes + MsgFunction::GspInitDone => write!(f, "INIT_DONE"), + MsgFunction::GspRunCpuSequencer => write!(f, "RUN_CPU_SEQUENCER"), + MsgFunction::PostEvent => write!(f, "POST_EVENT"), + MsgFunction::RcTriggered => write!(f, "RC_TRIGGERED"), + MsgFunction::MmuFaultQueued => write!(f, "MMU_FAULT_QUEUED"), + MsgFunction::OsErrorLog => write!(f, "OS_ERROR_LOG"), + MsgFunction::GspPostNoCat => write!(f, "NOCAT"), + MsgFunction::GspLockdownNotice => write!(f, "LOCKDOWN_NOTICE"), + MsgFunction::UcodeLibOsPrint => write!(f, "LIBOS_PRINT"), + } + } +} + +impl TryFrom<u32> for MsgFunction { + type Error = kernel::error::Error; + + fn try_from(value: u32) -> Result<MsgFunction> { + match value { + bindings::NV_VGPU_MSG_FUNCTION_NOP => Ok(MsgFunction::Nop), + bindings::NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO => { + Ok(MsgFunction::SetGuestSystemInfo) + } + bindings::NV_VGPU_MSG_FUNCTION_ALLOC_ROOT => Ok(MsgFunction::AllocRoot), + bindings::NV_VGPU_MSG_FUNCTION_ALLOC_DEVICE => Ok(MsgFunction::AllocDevice), + bindings::NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY => Ok(MsgFunction::AllocMemory), + bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CTX_DMA => Ok(MsgFunction::AllocCtxDma), + bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CHANNEL_DMA => Ok(MsgFunction::AllocChannelDma), + bindings::NV_VGPU_MSG_FUNCTION_MAP_MEMORY => Ok(MsgFunction::MapMemory), + bindings::NV_VGPU_MSG_FUNCTION_BIND_CTX_DMA => Ok(MsgFunction::BindCtxDma), + bindings::NV_VGPU_MSG_FUNCTION_ALLOC_OBJECT => Ok(MsgFunction::AllocObject), + bindings::NV_VGPU_MSG_FUNCTION_FREE => Ok(MsgFunction::Free), + bindings::NV_VGPU_MSG_FUNCTION_LOG => Ok(MsgFunction::Log), + bindings::NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO => Ok(MsgFunction::GetGspStaticInfo), + bindings::NV_VGPU_MSG_FUNCTION_SET_REGISTRY => Ok(MsgFunction::SetRegistry), + bindings::NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO => Ok(MsgFunction::GspSetSystemInfo), + bindings::NV_VGPU_MSG_FUNCTION_GSP_INIT_POST_OBJGPU => { + Ok(MsgFunction::GspInitPostObjGpu) + } + bindings::NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL => Ok(MsgFunction::GspRmControl), + bindings::NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO => Ok(MsgFunction::GetStaticInfo), + bindings::NV_VGPU_MSG_EVENT_GSP_INIT_DONE => Ok(MsgFunction::GspInitDone), + bindings::NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER => { + Ok(MsgFunction::GspRunCpuSequencer) + } + bindings::NV_VGPU_MSG_EVENT_POST_EVENT => Ok(MsgFunction::PostEvent), + bindings::NV_VGPU_MSG_EVENT_RC_TRIGGERED => Ok(MsgFunction::RcTriggered), + bindings::NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED => Ok(MsgFunction::MmuFaultQueued), + bindings::NV_VGPU_MSG_EVENT_OS_ERROR_LOG => Ok(MsgFunction::OsErrorLog), + bindings::NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD => Ok(MsgFunction::GspPostNoCat), + bindings::NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE => Ok(MsgFunction::GspLockdownNotice), + bindings::NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT => Ok(MsgFunction::UcodeLibOsPrint), + _ => Err(EINVAL), + } + } +} + +impl From<MsgFunction> for u32 { + fn from(value: MsgFunction) -> Self { + // CAST: `MsgFunction` is `repr(u32)` and can thus be cast losslessly. + value as u32 + } +} + +/// Sequencer buffer opcode for GSP sequencer commands. +#[derive(Copy, Clone, Debug, PartialEq)] +#[repr(u32)] +pub(crate) enum SeqBufOpcode { + // Core operation opcodes + CoreReset = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET, + CoreResume = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME, + CoreStart = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START, + CoreWaitForHalt = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT, + + // Delay opcode + DelayUs = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US, + + // Register operation opcodes + RegModify = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY, + RegPoll = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL, + RegStore = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE, + RegWrite = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE, +} + +impl fmt::Display for SeqBufOpcode { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + SeqBufOpcode::CoreReset => write!(f, "CORE_RESET"), + SeqBufOpcode::CoreResume => write!(f, "CORE_RESUME"), + SeqBufOpcode::CoreStart => write!(f, "CORE_START"), + SeqBufOpcode::CoreWaitForHalt => write!(f, "CORE_WAIT_FOR_HALT"), + SeqBufOpcode::DelayUs => write!(f, "DELAY_US"), + SeqBufOpcode::RegModify => write!(f, "REG_MODIFY"), + SeqBufOpcode::RegPoll => write!(f, "REG_POLL"), + SeqBufOpcode::RegStore => write!(f, "REG_STORE"), + SeqBufOpcode::RegWrite => write!(f, "REG_WRITE"), + } + } +} + +impl TryFrom<u32> for SeqBufOpcode { + type Error = kernel::error::Error; + + fn try_from(value: u32) -> Result<SeqBufOpcode> { + match value { + r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET => { + Ok(SeqBufOpcode::CoreReset) + } + r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME => { + Ok(SeqBufOpcode::CoreResume) + } + r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START => { + Ok(SeqBufOpcode::CoreStart) + } + r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT => { + Ok(SeqBufOpcode::CoreWaitForHalt) + } + r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US => Ok(SeqBufOpcode::DelayUs), + r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY => { + Ok(SeqBufOpcode::RegModify) + } + r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL => Ok(SeqBufOpcode::RegPoll), + r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE => Ok(SeqBufOpcode::RegStore), + r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE => Ok(SeqBufOpcode::RegWrite), + _ => Err(EINVAL), + } + } +} + +impl From<SeqBufOpcode> for u32 { + fn from(value: SeqBufOpcode) -> Self { + // CAST: `SeqBufOpcode` is `repr(u32)` and can thus be cast losslessly. + value as u32 + } +} + +/// Wrapper for GSP sequencer register write payload. +#[repr(transparent)] +#[derive(Copy, Clone)] +pub(crate) struct RegWritePayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_WRITE); + +impl RegWritePayload { + /// Returns the register address. + pub(crate) fn addr(&self) -> u32 { + self.0.addr + } + + /// Returns the value to write. + pub(crate) fn val(&self) -> u32 { + self.0.val + } +} + +// SAFETY: This struct only contains integer types for which all bit patterns are valid. +unsafe impl FromBytes for RegWritePayload {} + +// SAFETY: Padding is explicit and will not contain uninitialized data. +unsafe impl AsBytes for RegWritePayload {} + +/// Wrapper for GSP sequencer register modify payload. +#[repr(transparent)] +#[derive(Copy, Clone)] +pub(crate) struct RegModifyPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_MODIFY); + +impl RegModifyPayload { + /// Returns the register address. + pub(crate) fn addr(&self) -> u32 { + self.0.addr + } + + /// Returns the mask to apply. + pub(crate) fn mask(&self) -> u32 { + self.0.mask + } + + /// Returns the value to write. + pub(crate) fn val(&self) -> u32 { + self.0.val + } +} + +// SAFETY: This struct only contains integer types for which all bit patterns are valid. +unsafe impl FromBytes for RegModifyPayload {} + +// SAFETY: Padding is explicit and will not contain uninitialized data. +unsafe impl AsBytes for RegModifyPayload {} + +/// Wrapper for GSP sequencer register poll payload. +#[repr(transparent)] +#[derive(Copy, Clone)] +pub(crate) struct RegPollPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_POLL); + +impl RegPollPayload { + /// Returns the register address. + pub(crate) fn addr(&self) -> u32 { + self.0.addr + } + + /// Returns the mask to apply. + pub(crate) fn mask(&self) -> u32 { + self.0.mask + } + + /// Returns the expected value. + pub(crate) fn val(&self) -> u32 { + self.0.val + } + + /// Returns the timeout in microseconds. + pub(crate) fn timeout(&self) -> u32 { + self.0.timeout + } +} + +// SAFETY: This struct only contains integer types for which all bit patterns are valid. +unsafe impl FromBytes for RegPollPayload {} + +// SAFETY: Padding is explicit and will not contain uninitialized data. +unsafe impl AsBytes for RegPollPayload {} + +/// Wrapper for GSP sequencer delay payload. +#[repr(transparent)] +#[derive(Copy, Clone)] +pub(crate) struct DelayUsPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_DELAY_US); + +impl DelayUsPayload { + /// Returns the delay value in microseconds. + pub(crate) fn val(&self) -> u32 { + self.0.val + } +} + +// SAFETY: This struct only contains integer types for which all bit patterns are valid. +unsafe impl FromBytes for DelayUsPayload {} + +// SAFETY: Padding is explicit and will not contain uninitialized data. +unsafe impl AsBytes for DelayUsPayload {} + +/// Wrapper for GSP sequencer register store payload. +#[repr(transparent)] +#[derive(Copy, Clone)] +pub(crate) struct RegStorePayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_STORE); + +impl RegStorePayload { + /// Returns the register address. + pub(crate) fn addr(&self) -> u32 { + self.0.addr + } + + /// Returns the storage index. + #[allow(unused)] + pub(crate) fn index(&self) -> u32 { + self.0.index + } +} + +// SAFETY: This struct only contains integer types for which all bit patterns are valid. +unsafe impl FromBytes for RegStorePayload {} + +// SAFETY: Padding is explicit and will not contain uninitialized data. +unsafe impl AsBytes for RegStorePayload {} + +/// Wrapper for GSP sequencer buffer command. +#[repr(transparent)] +pub(crate) struct SequencerBufferCmd(r570_144::GSP_SEQUENCER_BUFFER_CMD); + +impl SequencerBufferCmd { + /// Returns the opcode as a `SeqBufOpcode` enum, or error if invalid. + pub(crate) fn opcode(&self) -> Result<SeqBufOpcode> { + self.0.opCode.try_into() + } + + /// Returns the register write payload by value. + /// + /// Returns an error if the opcode is not `SeqBufOpcode::RegWrite`. + pub(crate) fn reg_write_payload(&self) -> Result<RegWritePayload> { + if self.opcode()? != SeqBufOpcode::RegWrite { + return Err(EINVAL); + } + // SAFETY: Opcode is verified to be `RegWrite`, so union contains valid `RegWritePayload`. + let payload_bytes = unsafe { + core::slice::from_raw_parts( + core::ptr::addr_of!(self.0.payload.regWrite).cast::<u8>(), + core::mem::size_of::<RegWritePayload>(), + ) + }; + Ok(*RegWritePayload::from_bytes(payload_bytes).ok_or(EINVAL)?) + } + + /// Returns the register modify payload by value. + /// + /// Returns an error if the opcode is not `SeqBufOpcode::RegModify`. + pub(crate) fn reg_modify_payload(&self) -> Result<RegModifyPayload> { + if self.opcode()? != SeqBufOpcode::RegModify { + return Err(EINVAL); + } + // SAFETY: Opcode is verified to be `RegModify`, so union contains valid `RegModifyPayload`. + let payload_bytes = unsafe { + core::slice::from_raw_parts( + core::ptr::addr_of!(self.0.payload.regModify).cast::<u8>(), + core::mem::size_of::<RegModifyPayload>(), + ) + }; + Ok(*RegModifyPayload::from_bytes(payload_bytes).ok_or(EINVAL)?) + } + + /// Returns the register poll payload by value. + /// + /// Returns an error if the opcode is not `SeqBufOpcode::RegPoll`. + pub(crate) fn reg_poll_payload(&self) -> Result<RegPollPayload> { + if self.opcode()? != SeqBufOpcode::RegPoll { + return Err(EINVAL); + } + // SAFETY: Opcode is verified to be `RegPoll`, so union contains valid `RegPollPayload`. + let payload_bytes = unsafe { + core::slice::from_raw_parts( + core::ptr::addr_of!(self.0.payload.regPoll).cast::<u8>(), + core::mem::size_of::<RegPollPayload>(), + ) + }; + Ok(*RegPollPayload::from_bytes(payload_bytes).ok_or(EINVAL)?) + } + + /// Returns the delay payload by value. + /// + /// Returns an error if the opcode is not `SeqBufOpcode::DelayUs`. + pub(crate) fn delay_us_payload(&self) -> Result<DelayUsPayload> { + if self.opcode()? != SeqBufOpcode::DelayUs { + return Err(EINVAL); + } + // SAFETY: Opcode is verified to be `DelayUs`, so union contains valid `DelayUsPayload`. + let payload_bytes = unsafe { + core::slice::from_raw_parts( + core::ptr::addr_of!(self.0.payload.delayUs).cast::<u8>(), + core::mem::size_of::<DelayUsPayload>(), + ) + }; + Ok(*DelayUsPayload::from_bytes(payload_bytes).ok_or(EINVAL)?) + } + + /// Returns the register store payload by value. + /// + /// Returns an error if the opcode is not `SeqBufOpcode::RegStore`. + pub(crate) fn reg_store_payload(&self) -> Result<RegStorePayload> { + if self.opcode()? != SeqBufOpcode::RegStore { + return Err(EINVAL); + } + // SAFETY: Opcode is verified to be `RegStore`, so union contains valid `RegStorePayload`. + let payload_bytes = unsafe { + core::slice::from_raw_parts( + core::ptr::addr_of!(self.0.payload.regStore).cast::<u8>(), + core::mem::size_of::<RegStorePayload>(), + ) + }; + Ok(*RegStorePayload::from_bytes(payload_bytes).ok_or(EINVAL)?) + } +} + +// SAFETY: This struct only contains integer types for which all bit patterns are valid. +unsafe impl FromBytes for SequencerBufferCmd {} + +// SAFETY: Padding is explicit and will not contain uninitialized data. +unsafe impl AsBytes for SequencerBufferCmd {} + +/// Wrapper for GSP run CPU sequencer RPC. +#[repr(transparent)] +pub(crate) struct RunCpuSequencer(r570_144::rpc_run_cpu_sequencer_v17_00); + +impl RunCpuSequencer { + /// Returns the command index. + pub(crate) fn cmd_index(&self) -> u32 { + self.0.cmdIndex + } +} + +// SAFETY: This struct only contains integer types for which all bit patterns are valid. +unsafe impl FromBytes for RunCpuSequencer {} + +// SAFETY: Padding is explicit and will not contain uninitialized data. +unsafe impl AsBytes for RunCpuSequencer {} + +/// Struct containing the arguments required to pass a memory buffer to the GSP +/// for use during initialisation. +/// +/// The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is +/// configured for a larger page size (e.g. 64K pages), we need to give +/// the GSP an array of 4K pages. Since we only create physically contiguous +/// buffers the math to calculate the addresses is simple. +/// +/// The buffers must be a multiple of GSP_PAGE_SIZE. GSP-RM also currently +/// ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the +/// buffers to be physically contiguous anyway. +/// +/// The memory allocated for the arguments must remain until the GSP sends the +/// init_done RPC. +#[repr(transparent)] +pub(crate) struct LibosMemoryRegionInitArgument(bindings::LibosMemoryRegionInitArgument); + +// SAFETY: Padding is explicit and does not contain uninitialized data. +unsafe impl AsBytes for LibosMemoryRegionInitArgument {} + +// SAFETY: This struct only contains integer types for which all bit patterns +// are valid. +unsafe impl FromBytes for LibosMemoryRegionInitArgument {} + +impl LibosMemoryRegionInitArgument { + pub(crate) fn new<A: AsBytes + FromBytes>( + name: &'static str, + obj: &CoherentAllocation<A>, + ) -> Self { + /// Generates the `ID8` identifier required for some GSP objects. + fn id8(name: &str) -> u64 { + let mut bytes = [0u8; core::mem::size_of::<u64>()]; + + for (c, b) in name.bytes().rev().zip(&mut bytes) { + *b = c; + } + + u64::from_ne_bytes(bytes) + } + + Self(bindings::LibosMemoryRegionInitArgument { + id8: id8(name), + pa: obj.dma_handle(), + size: num::usize_as_u64(obj.size()), + kind: num::u32_into_u8::< + { bindings::LibosMemoryRegionKind_LIBOS_MEMORY_REGION_CONTIGUOUS }, + >(), + loc: num::u32_into_u8::< + { bindings::LibosMemoryRegionLoc_LIBOS_MEMORY_REGION_LOC_SYSMEM }, + >(), + ..Default::default() + }) + } +} + +/// TX header for setting up a message queue with the GSP. +#[repr(transparent)] +pub(crate) struct MsgqTxHeader(bindings::msgqTxHeader); + +impl MsgqTxHeader { + /// Create a new TX queue header. + /// + /// # Arguments + /// + /// * `msgq_size` - Total size of the message queue structure, in bytes. + /// * `rx_hdr_offset` - Offset, in bytes, of the start of the RX header in the message queue + /// structure. + /// * `msg_count` - Number of messages that can be sent, i.e. the number of memory pages + /// allocated for the message queue in the message queue structure. + pub(crate) fn new(msgq_size: u32, rx_hdr_offset: u32, msg_count: u32) -> Self { + Self(bindings::msgqTxHeader { + version: 0, + size: msgq_size, + msgSize: num::usize_into_u32::<GSP_PAGE_SIZE>(), + msgCount: msg_count, + writePtr: 0, + flags: 1, + rxHdrOff: rx_hdr_offset, + entryOff: num::usize_into_u32::<GSP_PAGE_SIZE>(), + }) + } + + /// Returns the value of the write pointer for this queue. + pub(crate) fn write_ptr(&self) -> u32 { + let ptr = core::ptr::from_ref(&self.0.writePtr); + + // SAFETY: `ptr` is a valid pointer to a `u32`. + unsafe { ptr.read_volatile() } + } + + /// Sets the value of the write pointer for this queue. + pub(crate) fn set_write_ptr(&mut self, val: u32) { + let ptr = core::ptr::from_mut(&mut self.0.writePtr); + + // SAFETY: `ptr` is a valid pointer to a `u32`. + unsafe { ptr.write_volatile(val) } + } +} + +// SAFETY: Padding is explicit and does not contain uninitialized data. +unsafe impl AsBytes for MsgqTxHeader {} + +/// RX header for setting up a message queue with the GSP. +#[repr(transparent)] +pub(crate) struct MsgqRxHeader(bindings::msgqRxHeader); + +/// Header for the message RX queue. +impl MsgqRxHeader { + /// Creates a new RX queue header. + pub(crate) fn new() -> Self { + Self(Default::default()) + } + + /// Returns the value of the read pointer for this queue. + pub(crate) fn read_ptr(&self) -> u32 { + let ptr = core::ptr::from_ref(&self.0.readPtr); + + // SAFETY: `ptr` is a valid pointer to a `u32`. + unsafe { ptr.read_volatile() } + } + + /// Sets the value of the read pointer for this queue. + pub(crate) fn set_read_ptr(&mut self, val: u32) { + let ptr = core::ptr::from_mut(&mut self.0.readPtr); + + // SAFETY: `ptr` is a valid pointer to a `u32`. + unsafe { ptr.write_volatile(val) } + } +} + +// SAFETY: Padding is explicit and does not contain uninitialized data. +unsafe impl AsBytes for MsgqRxHeader {} + +bitfield! { + struct MsgHeaderVersion(u32) { + 31:24 major as u8; + 23:16 minor as u8; + } +} + +impl MsgHeaderVersion { + const MAJOR_TOT: u8 = 3; + const MINOR_TOT: u8 = 0; + + fn new() -> Self { + Self::default() + .set_major(Self::MAJOR_TOT) + .set_minor(Self::MINOR_TOT) + } +} + +impl bindings::rpc_message_header_v { + fn init(cmd_size: usize, function: MsgFunction) -> impl Init<Self, Error> { + type RpcMessageHeader = bindings::rpc_message_header_v; + + try_init!(RpcMessageHeader { + header_version: MsgHeaderVersion::new().into(), + signature: bindings::NV_VGPU_MSG_SIGNATURE_VALID, + function: function.into(), + length: size_of::<Self>() + .checked_add(cmd_size) + .ok_or(EOVERFLOW) + .and_then(|v| v.try_into().map_err(|_| EINVAL))?, + rpc_result: 0xffffffff, + rpc_result_private: 0xffffffff, + ..Zeroable::init_zeroed() + }) + } +} + +// SAFETY: We can't derive the Zeroable trait for this binding because the +// procedural macro doesn't support the syntax used by bindgen to create the +// __IncompleteArrayField types. So instead we implement it here, which is safe +// because these are explicitly padded structures only containing types for +// which any bit pattern, including all zeros, is valid. +unsafe impl Zeroable for bindings::rpc_message_header_v {} + +/// GSP Message Element. +/// +/// This is essentially a message header expected to be followed by the message data. +#[repr(transparent)] +pub(crate) struct GspMsgElement { + inner: bindings::GSP_MSG_QUEUE_ELEMENT, +} + +impl GspMsgElement { + /// Creates a new message element. + /// + /// # Arguments + /// + /// * `sequence` - Sequence number of the message. + /// * `cmd_size` - Size of the command (not including the message element), in bytes. + /// * `function` - Function of the message. + #[allow(non_snake_case)] + pub(crate) fn init( + sequence: u32, + cmd_size: usize, + function: MsgFunction, + ) -> impl Init<Self, Error> { + type RpcMessageHeader = bindings::rpc_message_header_v; + type InnerGspMsgElement = bindings::GSP_MSG_QUEUE_ELEMENT; + let init_inner = try_init!(InnerGspMsgElement { + seqNum: sequence, + elemCount: size_of::<Self>() + .checked_add(cmd_size) + .ok_or(EOVERFLOW)? + .div_ceil(GSP_PAGE_SIZE) + .try_into() + .map_err(|_| EOVERFLOW)?, + rpc <- RpcMessageHeader::init(cmd_size, function), + ..Zeroable::init_zeroed() + }); + + try_init!(GspMsgElement { + inner <- init_inner, + }) + } + + /// Sets the checksum of this message. + /// + /// Since the header is also part of the checksum, this is usually called after the whole + /// message has been written to the shared memory area. + pub(crate) fn set_checksum(&mut self, checksum: u32) { + self.inner.checkSum = checksum; + } + + /// Returns the total length of the message. + pub(crate) fn length(&self) -> usize { + // `rpc.length` includes the length of the GspRpcHeader but not the message header. + size_of::<Self>() - size_of::<bindings::rpc_message_header_v>() + + num::u32_as_usize(self.inner.rpc.length) + } + + // Returns the sequence number of the message. + pub(crate) fn sequence(&self) -> u32 { + self.inner.rpc.sequence + } + + // Returns the function of the message, if it is valid, or the invalid function number as an + // error. + pub(crate) fn function(&self) -> Result<MsgFunction, u32> { + self.inner + .rpc + .function + .try_into() + .map_err(|_| self.inner.rpc.function) + } + + // Returns the number of elements (i.e. memory pages) used by this message. + pub(crate) fn element_count(&self) -> u32 { + self.inner.elemCount + } +} + +// SAFETY: Padding is explicit and does not contain uninitialized data. +unsafe impl AsBytes for GspMsgElement {} + +// SAFETY: This struct only contains integer types for which all bit patterns +// are valid. +unsafe impl FromBytes for GspMsgElement {} + +/// Arguments for GSP startup. +#[repr(transparent)] +pub(crate) struct GspArgumentsCached(bindings::GSP_ARGUMENTS_CACHED); + +impl GspArgumentsCached { + /// Creates the arguments for starting the GSP up using `cmdq` as its command queue. + pub(crate) fn new(cmdq: &Cmdq) -> Self { + Self(bindings::GSP_ARGUMENTS_CACHED { + messageQueueInitArguments: MessageQueueInitArguments::new(cmdq).0, + bDmemStack: 1, + ..Default::default() + }) + } +} + +// SAFETY: Padding is explicit and will not contain uninitialized data. +unsafe impl AsBytes for GspArgumentsCached {} + +// SAFETY: This struct only contains integer types for which all bit patterns +// are valid. +unsafe impl FromBytes for GspArgumentsCached {} + +/// Init arguments for the message queue. +#[repr(transparent)] +struct MessageQueueInitArguments(bindings::MESSAGE_QUEUE_INIT_ARGUMENTS); + +impl MessageQueueInitArguments { + /// Creates a new init arguments structure for `cmdq`. + fn new(cmdq: &Cmdq) -> Self { + Self(bindings::MESSAGE_QUEUE_INIT_ARGUMENTS { + sharedMemPhysAddr: cmdq.dma_handle(), + pageTableEntryCount: num::usize_into_u32::<{ Cmdq::NUM_PTES }>(), + cmdQueueOffset: num::usize_as_u64(Cmdq::CMDQ_OFFSET), + statQueueOffset: num::usize_as_u64(Cmdq::STATQ_OFFSET), + ..Default::default() + }) + } +} diff --git a/drivers/gpu/nova-core/gsp/fw/commands.rs b/drivers/gpu/nova-core/gsp/fw/commands.rs new file mode 100644 index 000000000000..21be44199693 --- /dev/null +++ b/drivers/gpu/nova-core/gsp/fw/commands.rs @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0 + +use kernel::prelude::*; +use kernel::transmute::{AsBytes, FromBytes}; +use kernel::{device, pci}; + +use crate::gsp::GSP_PAGE_SIZE; + +use super::bindings; + +/// Payload of the `GspSetSystemInfo` command. +#[repr(transparent)] +pub(crate) struct GspSetSystemInfo { + inner: bindings::GspSystemInfo, +} +static_assert!(size_of::<GspSetSystemInfo>() < GSP_PAGE_SIZE); + +impl GspSetSystemInfo { + /// Returns an in-place initializer for the `GspSetSystemInfo` command. + #[allow(non_snake_case)] + pub(crate) fn init<'a>(dev: &'a pci::Device<device::Bound>) -> impl Init<Self, Error> + 'a { + type InnerGspSystemInfo = bindings::GspSystemInfo; + let init_inner = try_init!(InnerGspSystemInfo { + gpuPhysAddr: dev.resource_start(0)?, + gpuPhysFbAddr: dev.resource_start(1)?, + gpuPhysInstAddr: dev.resource_start(3)?, + nvDomainBusDeviceFunc: u64::from(dev.dev_id()), + + // Using TASK_SIZE in r535_gsp_rpc_set_system_info() seems wrong because + // TASK_SIZE is per-task. That's probably a design issue in GSP-RM though. + maxUserVa: (1 << 47) - 4096, + pciConfigMirrorBase: 0x088000, + pciConfigMirrorSize: 0x001000, + + PCIDeviceID: (u32::from(dev.device_id()) << 16) | u32::from(dev.vendor_id().as_raw()), + PCISubDeviceID: (u32::from(dev.subsystem_device_id()) << 16) + | u32::from(dev.subsystem_vendor_id()), + PCIRevisionID: u32::from(dev.revision_id()), + bIsPrimary: 0, + bPreserveVideoMemoryAllocations: 0, + ..Zeroable::init_zeroed() + }); + + try_init!(GspSetSystemInfo { + inner <- init_inner, + }) + } +} + +// SAFETY: These structs don't meet the no-padding requirements of AsBytes but +// that is not a problem because they are not used outside the kernel. +unsafe impl AsBytes for GspSetSystemInfo {} + +// SAFETY: These structs don't meet the no-padding requirements of FromBytes but +// that is not a problem because they are not used outside the kernel. +unsafe impl FromBytes for GspSetSystemInfo {} + +#[repr(transparent)] +pub(crate) struct PackedRegistryEntry(bindings::PACKED_REGISTRY_ENTRY); + +impl PackedRegistryEntry { + pub(crate) fn new(offset: u32, value: u32) -> Self { + Self({ + bindings::PACKED_REGISTRY_ENTRY { + nameOffset: offset, + + // We only support DWORD types for now. Support for other types + // will come later if required. + type_: bindings::REGISTRY_TABLE_ENTRY_TYPE_DWORD as u8, + __bindgen_padding_0: Default::default(), + data: value, + length: 0, + } + }) + } +} + +// SAFETY: Padding is explicit and will not contain uninitialized data. +unsafe impl AsBytes for PackedRegistryEntry {} + +/// Payload of the `SetRegistry` command. +#[repr(transparent)] +pub(crate) struct PackedRegistryTable { + inner: bindings::PACKED_REGISTRY_TABLE, +} + +impl PackedRegistryTable { + #[allow(non_snake_case)] + pub(crate) fn init(num_entries: u32, size: u32) -> impl Init<Self> { + type InnerPackedRegistryTable = bindings::PACKED_REGISTRY_TABLE; + let init_inner = init!(InnerPackedRegistryTable { + numEntries: num_entries, + size, + entries: Default::default() + }); + + init!(PackedRegistryTable { inner <- init_inner }) + } +} + +// SAFETY: Padding is explicit and will not contain uninitialized data. +unsafe impl AsBytes for PackedRegistryTable {} + +// SAFETY: This struct only contains integer types for which all bit patterns +// are valid. +unsafe impl FromBytes for PackedRegistryTable {} + +/// Payload of the `GetGspStaticInfo` command and message. +#[repr(transparent)] +pub(crate) struct GspStaticConfigInfo(bindings::GspStaticConfigInfo_t); + +impl GspStaticConfigInfo { + /// Returns a bytes array containing the (hopefully) zero-terminated name of this GPU. + pub(crate) fn gpu_name_str(&self) -> [u8; 64] { + self.0.gpuNameString + } +} + +// SAFETY: Padding is explicit and will not contain uninitialized data. +unsafe impl AsBytes for GspStaticConfigInfo {} + +// SAFETY: This struct only contains integer types for which all bit patterns +// are valid. +unsafe impl FromBytes for GspStaticConfigInfo {} + +// SAFETY: This struct only contains integer types and fixed-size arrays for which +// all bit patterns are valid. +unsafe impl Zeroable for GspStaticConfigInfo {} diff --git a/drivers/gpu/nova-core/gsp/fw/r570_144.rs b/drivers/gpu/nova-core/gsp/fw/r570_144.rs index 35cb0370a7c9..048234d1a9d1 100644 --- a/drivers/gpu/nova-core/gsp/fw/r570_144.rs +++ b/drivers/gpu/nova-core/gsp/fw/r570_144.rs @@ -12,7 +12,6 @@ #![cfg_attr(test, allow(unsafe_op_in_unsafe_fn))] #![allow( dead_code, - unused_imports, clippy::all, clippy::undocumented_unsafe_blocks, clippy::ptr_as_ptr, @@ -25,5 +24,8 @@ unreachable_pub, unsafe_op_in_unsafe_fn )] -use kernel::ffi; +use kernel::{ + ffi, + prelude::Zeroable, // +}; include!("r570_144/bindings.rs"); diff --git a/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs b/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs index cec594032515..5bcfbcd1ad22 100644 --- a/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs +++ b/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs @@ -1 +1,951 @@ // SPDX-License-Identifier: GPL-2.0 + +#[repr(C)] +#[derive(Default)] +pub struct __IncompleteArrayField<T>(::core::marker::PhantomData<T>, [T; 0]); +impl<T> __IncompleteArrayField<T> { + #[inline] + pub const fn new() -> Self { + __IncompleteArrayField(::core::marker::PhantomData, []) + } + #[inline] + pub fn as_ptr(&self) -> *const T { + self as *const _ as *const T + } + #[inline] + pub fn as_mut_ptr(&mut self) -> *mut T { + self as *mut _ as *mut T + } + #[inline] + pub unsafe fn as_slice(&self, len: usize) -> &[T] { + ::core::slice::from_raw_parts(self.as_ptr(), len) + } + #[inline] + pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { + ::core::slice::from_raw_parts_mut(self.as_mut_ptr(), len) + } +} +impl<T> ::core::fmt::Debug for __IncompleteArrayField<T> { + fn fmt(&self, fmt: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + fmt.write_str("__IncompleteArrayField") + } +} +pub const NV_VGPU_MSG_SIGNATURE_VALID: u32 = 1129337430; +pub const GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2: u32 = 0; +pub const GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL: u32 = 23068672; +pub const GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X: u32 = 8388608; +pub const GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB: u32 = 98304; +pub const GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE: u32 = 100663296; +pub const GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB: u32 = 64; +pub const GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MAX_MB: u32 = 256; +pub const GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB: u32 = 88; +pub const GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MAX_MB: u32 = 280; +pub const GSP_FW_WPR_META_REVISION: u32 = 1; +pub const GSP_FW_WPR_META_MAGIC: i64 = -2577556379034558285; +pub const REGISTRY_TABLE_ENTRY_TYPE_DWORD: u32 = 1; +pub type __u8 = ffi::c_uchar; +pub type __u16 = ffi::c_ushort; +pub type __u32 = ffi::c_uint; +pub type __u64 = ffi::c_ulonglong; +pub type u8_ = __u8; +pub type u16_ = __u16; +pub type u32_ = __u32; +pub type u64_ = __u64; +pub const NV_VGPU_MSG_FUNCTION_NOP: _bindgen_ty_2 = 0; +pub const NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO: _bindgen_ty_2 = 1; +pub const NV_VGPU_MSG_FUNCTION_ALLOC_ROOT: _bindgen_ty_2 = 2; +pub const NV_VGPU_MSG_FUNCTION_ALLOC_DEVICE: _bindgen_ty_2 = 3; +pub const NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY: _bindgen_ty_2 = 4; +pub const NV_VGPU_MSG_FUNCTION_ALLOC_CTX_DMA: _bindgen_ty_2 = 5; +pub const NV_VGPU_MSG_FUNCTION_ALLOC_CHANNEL_DMA: _bindgen_ty_2 = 6; +pub const NV_VGPU_MSG_FUNCTION_MAP_MEMORY: _bindgen_ty_2 = 7; +pub const NV_VGPU_MSG_FUNCTION_BIND_CTX_DMA: _bindgen_ty_2 = 8; +pub const NV_VGPU_MSG_FUNCTION_ALLOC_OBJECT: _bindgen_ty_2 = 9; +pub const NV_VGPU_MSG_FUNCTION_FREE: _bindgen_ty_2 = 10; +pub const NV_VGPU_MSG_FUNCTION_LOG: _bindgen_ty_2 = 11; +pub const NV_VGPU_MSG_FUNCTION_ALLOC_VIDMEM: _bindgen_ty_2 = 12; +pub const NV_VGPU_MSG_FUNCTION_UNMAP_MEMORY: _bindgen_ty_2 = 13; +pub const NV_VGPU_MSG_FUNCTION_MAP_MEMORY_DMA: _bindgen_ty_2 = 14; +pub const NV_VGPU_MSG_FUNCTION_UNMAP_MEMORY_DMA: _bindgen_ty_2 = 15; +pub const NV_VGPU_MSG_FUNCTION_GET_EDID: _bindgen_ty_2 = 16; +pub const NV_VGPU_MSG_FUNCTION_ALLOC_DISP_CHANNEL: _bindgen_ty_2 = 17; +pub const NV_VGPU_MSG_FUNCTION_ALLOC_DISP_OBJECT: _bindgen_ty_2 = 18; +pub const NV_VGPU_MSG_FUNCTION_ALLOC_SUBDEVICE: _bindgen_ty_2 = 19; +pub const NV_VGPU_MSG_FUNCTION_ALLOC_DYNAMIC_MEMORY: _bindgen_ty_2 = 20; +pub const NV_VGPU_MSG_FUNCTION_DUP_OBJECT: _bindgen_ty_2 = 21; +pub const NV_VGPU_MSG_FUNCTION_IDLE_CHANNELS: _bindgen_ty_2 = 22; +pub const NV_VGPU_MSG_FUNCTION_ALLOC_EVENT: _bindgen_ty_2 = 23; +pub const NV_VGPU_MSG_FUNCTION_SEND_EVENT: _bindgen_ty_2 = 24; +pub const NV_VGPU_MSG_FUNCTION_REMAPPER_CONTROL: _bindgen_ty_2 = 25; +pub const NV_VGPU_MSG_FUNCTION_DMA_CONTROL: _bindgen_ty_2 = 26; +pub const NV_VGPU_MSG_FUNCTION_DMA_FILL_PTE_MEM: _bindgen_ty_2 = 27; +pub const NV_VGPU_MSG_FUNCTION_MANAGE_HW_RESOURCE: _bindgen_ty_2 = 28; +pub const NV_VGPU_MSG_FUNCTION_BIND_ARBITRARY_CTX_DMA: _bindgen_ty_2 = 29; +pub const NV_VGPU_MSG_FUNCTION_CREATE_FB_SEGMENT: _bindgen_ty_2 = 30; +pub const NV_VGPU_MSG_FUNCTION_DESTROY_FB_SEGMENT: _bindgen_ty_2 = 31; +pub const NV_VGPU_MSG_FUNCTION_ALLOC_SHARE_DEVICE: _bindgen_ty_2 = 32; +pub const NV_VGPU_MSG_FUNCTION_DEFERRED_API_CONTROL: _bindgen_ty_2 = 33; +pub const NV_VGPU_MSG_FUNCTION_REMOVE_DEFERRED_API: _bindgen_ty_2 = 34; +pub const NV_VGPU_MSG_FUNCTION_SIM_ESCAPE_READ: _bindgen_ty_2 = 35; +pub const NV_VGPU_MSG_FUNCTION_SIM_ESCAPE_WRITE: _bindgen_ty_2 = 36; +pub const NV_VGPU_MSG_FUNCTION_SIM_MANAGE_DISPLAY_CONTEXT_DMA: _bindgen_ty_2 = 37; +pub const NV_VGPU_MSG_FUNCTION_FREE_VIDMEM_VIRT: _bindgen_ty_2 = 38; +pub const NV_VGPU_MSG_FUNCTION_PERF_GET_PSTATE_INFO: _bindgen_ty_2 = 39; +pub const NV_VGPU_MSG_FUNCTION_PERF_GET_PERFMON_SAMPLE: _bindgen_ty_2 = 40; +pub const NV_VGPU_MSG_FUNCTION_PERF_GET_VIRTUAL_PSTATE_INFO: _bindgen_ty_2 = 41; +pub const NV_VGPU_MSG_FUNCTION_PERF_GET_LEVEL_INFO: _bindgen_ty_2 = 42; +pub const NV_VGPU_MSG_FUNCTION_MAP_SEMA_MEMORY: _bindgen_ty_2 = 43; +pub const NV_VGPU_MSG_FUNCTION_UNMAP_SEMA_MEMORY: _bindgen_ty_2 = 44; +pub const NV_VGPU_MSG_FUNCTION_SET_SURFACE_PROPERTIES: _bindgen_ty_2 = 45; +pub const NV_VGPU_MSG_FUNCTION_CLEANUP_SURFACE: _bindgen_ty_2 = 46; +pub const NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER: _bindgen_ty_2 = 47; +pub const NV_VGPU_MSG_FUNCTION_TDR_SET_TIMEOUT_STATE: _bindgen_ty_2 = 48; +pub const NV_VGPU_MSG_FUNCTION_SWITCH_TO_VGA: _bindgen_ty_2 = 49; +pub const NV_VGPU_MSG_FUNCTION_GPU_EXEC_REG_OPS: _bindgen_ty_2 = 50; +pub const NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO: _bindgen_ty_2 = 51; +pub const NV_VGPU_MSG_FUNCTION_ALLOC_VIRTMEM: _bindgen_ty_2 = 52; +pub const NV_VGPU_MSG_FUNCTION_UPDATE_PDE_2: _bindgen_ty_2 = 53; +pub const NV_VGPU_MSG_FUNCTION_SET_PAGE_DIRECTORY: _bindgen_ty_2 = 54; +pub const NV_VGPU_MSG_FUNCTION_GET_STATIC_PSTATE_INFO: _bindgen_ty_2 = 55; +pub const NV_VGPU_MSG_FUNCTION_TRANSLATE_GUEST_GPU_PTES: _bindgen_ty_2 = 56; +pub const NV_VGPU_MSG_FUNCTION_RESERVED_57: _bindgen_ty_2 = 57; +pub const NV_VGPU_MSG_FUNCTION_RESET_CURRENT_GR_CONTEXT: _bindgen_ty_2 = 58; +pub const NV_VGPU_MSG_FUNCTION_SET_SEMA_MEM_VALIDATION_STATE: _bindgen_ty_2 = 59; +pub const NV_VGPU_MSG_FUNCTION_GET_ENGINE_UTILIZATION: _bindgen_ty_2 = 60; +pub const NV_VGPU_MSG_FUNCTION_UPDATE_GPU_PDES: _bindgen_ty_2 = 61; +pub const NV_VGPU_MSG_FUNCTION_GET_ENCODER_CAPACITY: _bindgen_ty_2 = 62; +pub const NV_VGPU_MSG_FUNCTION_VGPU_PF_REG_READ32: _bindgen_ty_2 = 63; +pub const NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO_EXT: _bindgen_ty_2 = 64; +pub const NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO: _bindgen_ty_2 = 65; +pub const NV_VGPU_MSG_FUNCTION_RMFS_INIT: _bindgen_ty_2 = 66; +pub const NV_VGPU_MSG_FUNCTION_RMFS_CLOSE_QUEUE: _bindgen_ty_2 = 67; +pub const NV_VGPU_MSG_FUNCTION_RMFS_CLEANUP: _bindgen_ty_2 = 68; +pub const NV_VGPU_MSG_FUNCTION_RMFS_TEST: _bindgen_ty_2 = 69; +pub const NV_VGPU_MSG_FUNCTION_UPDATE_BAR_PDE: _bindgen_ty_2 = 70; +pub const NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD: _bindgen_ty_2 = 71; +pub const NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO: _bindgen_ty_2 = 72; +pub const NV_VGPU_MSG_FUNCTION_SET_REGISTRY: _bindgen_ty_2 = 73; +pub const NV_VGPU_MSG_FUNCTION_GSP_INIT_POST_OBJGPU: _bindgen_ty_2 = 74; +pub const NV_VGPU_MSG_FUNCTION_SUBDEV_EVENT_SET_NOTIFICATION: _bindgen_ty_2 = 75; +pub const NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL: _bindgen_ty_2 = 76; +pub const NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO2: _bindgen_ty_2 = 77; +pub const NV_VGPU_MSG_FUNCTION_DUMP_PROTOBUF_COMPONENT: _bindgen_ty_2 = 78; +pub const NV_VGPU_MSG_FUNCTION_UNSET_PAGE_DIRECTORY: _bindgen_ty_2 = 79; +pub const NV_VGPU_MSG_FUNCTION_GET_CONSOLIDATED_STATIC_INFO: _bindgen_ty_2 = 80; +pub const NV_VGPU_MSG_FUNCTION_GMMU_REGISTER_FAULT_BUFFER: _bindgen_ty_2 = 81; +pub const NV_VGPU_MSG_FUNCTION_GMMU_UNREGISTER_FAULT_BUFFER: _bindgen_ty_2 = 82; +pub const NV_VGPU_MSG_FUNCTION_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER: _bindgen_ty_2 = 83; +pub const NV_VGPU_MSG_FUNCTION_GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER: _bindgen_ty_2 = 84; +pub const NV_VGPU_MSG_FUNCTION_CTRL_SET_VGPU_FB_USAGE: _bindgen_ty_2 = 85; +pub const NV_VGPU_MSG_FUNCTION_CTRL_NVFBC_SW_SESSION_UPDATE_INFO: _bindgen_ty_2 = 86; +pub const NV_VGPU_MSG_FUNCTION_CTRL_NVENC_SW_SESSION_UPDATE_INFO: _bindgen_ty_2 = 87; +pub const NV_VGPU_MSG_FUNCTION_CTRL_RESET_CHANNEL: _bindgen_ty_2 = 88; +pub const NV_VGPU_MSG_FUNCTION_CTRL_RESET_ISOLATED_CHANNEL: _bindgen_ty_2 = 89; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GPU_HANDLE_VF_PRI_FAULT: _bindgen_ty_2 = 90; +pub const NV_VGPU_MSG_FUNCTION_CTRL_CLK_GET_EXTENDED_INFO: _bindgen_ty_2 = 91; +pub const NV_VGPU_MSG_FUNCTION_CTRL_PERF_BOOST: _bindgen_ty_2 = 92; +pub const NV_VGPU_MSG_FUNCTION_CTRL_PERF_VPSTATES_GET_CONTROL: _bindgen_ty_2 = 93; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_ZBC_CLEAR_TABLE: _bindgen_ty_2 = 94; +pub const NV_VGPU_MSG_FUNCTION_CTRL_SET_ZBC_COLOR_CLEAR: _bindgen_ty_2 = 95; +pub const NV_VGPU_MSG_FUNCTION_CTRL_SET_ZBC_DEPTH_CLEAR: _bindgen_ty_2 = 96; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GPFIFO_SCHEDULE: _bindgen_ty_2 = 97; +pub const NV_VGPU_MSG_FUNCTION_CTRL_SET_TIMESLICE: _bindgen_ty_2 = 98; +pub const NV_VGPU_MSG_FUNCTION_CTRL_PREEMPT: _bindgen_ty_2 = 99; +pub const NV_VGPU_MSG_FUNCTION_CTRL_FIFO_DISABLE_CHANNELS: _bindgen_ty_2 = 100; +pub const NV_VGPU_MSG_FUNCTION_CTRL_SET_TSG_INTERLEAVE_LEVEL: _bindgen_ty_2 = 101; +pub const NV_VGPU_MSG_FUNCTION_CTRL_SET_CHANNEL_INTERLEAVE_LEVEL: _bindgen_ty_2 = 102; +pub const NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC: _bindgen_ty_2 = 103; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_P2P_CAPS_V2: _bindgen_ty_2 = 104; +pub const NV_VGPU_MSG_FUNCTION_CTRL_CIPHER_AES_ENCRYPT: _bindgen_ty_2 = 105; +pub const NV_VGPU_MSG_FUNCTION_CTRL_CIPHER_SESSION_KEY: _bindgen_ty_2 = 106; +pub const NV_VGPU_MSG_FUNCTION_CTRL_CIPHER_SESSION_KEY_STATUS: _bindgen_ty_2 = 107; +pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES: _bindgen_ty_2 = 108; +pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_READ_ALL_SM_ERROR_STATES: _bindgen_ty_2 = 109; +pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_EXCEPTION_MASK: _bindgen_ty_2 = 110; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GPU_PROMOTE_CTX: _bindgen_ty_2 = 111; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GR_CTXSW_PREEMPTION_BIND: _bindgen_ty_2 = 112; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GR_SET_CTXSW_PREEMPTION_MODE: _bindgen_ty_2 = 113; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GR_CTXSW_ZCULL_BIND: _bindgen_ty_2 = 114; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GPU_INITIALIZE_CTX: _bindgen_ty_2 = 115; +pub const NV_VGPU_MSG_FUNCTION_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES: _bindgen_ty_2 = 116; +pub const NV_VGPU_MSG_FUNCTION_CTRL_FIFO_CLEAR_FAULTED_BIT: _bindgen_ty_2 = 117; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_LATEST_ECC_ADDRESSES: _bindgen_ty_2 = 118; +pub const NV_VGPU_MSG_FUNCTION_CTRL_MC_SERVICE_INTERRUPTS: _bindgen_ty_2 = 119; +pub const NV_VGPU_MSG_FUNCTION_CTRL_DMA_SET_DEFAULT_VASPACE: _bindgen_ty_2 = 120; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_CE_PCE_MASK: _bindgen_ty_2 = 121; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY: _bindgen_ty_2 = 122; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_NVLINK_PEER_ID_MASK: _bindgen_ty_2 = 123; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_NVLINK_STATUS: _bindgen_ty_2 = 124; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_P2P_CAPS: _bindgen_ty_2 = 125; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_P2P_CAPS_MATRIX: _bindgen_ty_2 = 126; +pub const NV_VGPU_MSG_FUNCTION_RESERVED_0: _bindgen_ty_2 = 127; +pub const NV_VGPU_MSG_FUNCTION_CTRL_RESERVE_PM_AREA_SMPC: _bindgen_ty_2 = 128; +pub const NV_VGPU_MSG_FUNCTION_CTRL_RESERVE_HWPM_LEGACY: _bindgen_ty_2 = 129; +pub const NV_VGPU_MSG_FUNCTION_CTRL_B0CC_EXEC_REG_OPS: _bindgen_ty_2 = 130; +pub const NV_VGPU_MSG_FUNCTION_CTRL_BIND_PM_RESOURCES: _bindgen_ty_2 = 131; +pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_SUSPEND_CONTEXT: _bindgen_ty_2 = 132; +pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_RESUME_CONTEXT: _bindgen_ty_2 = 133; +pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_EXEC_REG_OPS: _bindgen_ty_2 = 134; +pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_MODE_MMU_DEBUG: _bindgen_ty_2 = 135; +pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_READ_SINGLE_SM_ERROR_STATE: _bindgen_ty_2 = 136; +pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE: _bindgen_ty_2 = 137; +pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_MODE_ERRBAR_DEBUG: _bindgen_ty_2 = 138; +pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE: _bindgen_ty_2 = 139; +pub const NV_VGPU_MSG_FUNCTION_CTRL_ALLOC_PMA_STREAM: _bindgen_ty_2 = 140; +pub const NV_VGPU_MSG_FUNCTION_CTRL_PMA_STREAM_UPDATE_GET_PUT: _bindgen_ty_2 = 141; +pub const NV_VGPU_MSG_FUNCTION_CTRL_FB_GET_INFO_V2: _bindgen_ty_2 = 142; +pub const NV_VGPU_MSG_FUNCTION_CTRL_FIFO_SET_CHANNEL_PROPERTIES: _bindgen_ty_2 = 143; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GR_GET_CTX_BUFFER_INFO: _bindgen_ty_2 = 144; +pub const NV_VGPU_MSG_FUNCTION_CTRL_KGR_GET_CTX_BUFFER_PTES: _bindgen_ty_2 = 145; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GPU_EVICT_CTX: _bindgen_ty_2 = 146; +pub const NV_VGPU_MSG_FUNCTION_CTRL_FB_GET_FS_INFO: _bindgen_ty_2 = 147; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GRMGR_GET_GR_FS_INFO: _bindgen_ty_2 = 148; +pub const NV_VGPU_MSG_FUNCTION_CTRL_STOP_CHANNEL: _bindgen_ty_2 = 149; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GR_PC_SAMPLING_MODE: _bindgen_ty_2 = 150; +pub const NV_VGPU_MSG_FUNCTION_CTRL_PERF_RATED_TDP_GET_STATUS: _bindgen_ty_2 = 151; +pub const NV_VGPU_MSG_FUNCTION_CTRL_PERF_RATED_TDP_SET_CONTROL: _bindgen_ty_2 = 152; +pub const NV_VGPU_MSG_FUNCTION_CTRL_FREE_PMA_STREAM: _bindgen_ty_2 = 153; +pub const NV_VGPU_MSG_FUNCTION_CTRL_TIMER_SET_GR_TICK_FREQ: _bindgen_ty_2 = 154; +pub const NV_VGPU_MSG_FUNCTION_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB: _bindgen_ty_2 = 155; +pub const NV_VGPU_MSG_FUNCTION_GET_CONSOLIDATED_GR_STATIC_INFO: _bindgen_ty_2 = 156; +pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP: _bindgen_ty_2 = 157; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GR_GET_TPC_PARTITION_MODE: _bindgen_ty_2 = 158; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GR_SET_TPC_PARTITION_MODE: _bindgen_ty_2 = 159; +pub const NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_ALLOCATE: _bindgen_ty_2 = 160; +pub const NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_DESTROY: _bindgen_ty_2 = 161; +pub const NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_MAP: _bindgen_ty_2 = 162; +pub const NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_UNMAP: _bindgen_ty_2 = 163; +pub const NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_PUSH_STREAM: _bindgen_ty_2 = 164; +pub const NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_SET_HANDLES: _bindgen_ty_2 = 165; +pub const NV_VGPU_MSG_FUNCTION_UVM_METHOD_STREAM_GUEST_PAGES_OPERATION: _bindgen_ty_2 = 166; +pub const NV_VGPU_MSG_FUNCTION_CTRL_INTERNAL_QUIESCE_PMA_CHANNEL: _bindgen_ty_2 = 167; +pub const NV_VGPU_MSG_FUNCTION_DCE_RM_INIT: _bindgen_ty_2 = 168; +pub const NV_VGPU_MSG_FUNCTION_REGISTER_VIRTUAL_EVENT_BUFFER: _bindgen_ty_2 = 169; +pub const NV_VGPU_MSG_FUNCTION_CTRL_EVENT_BUFFER_UPDATE_GET: _bindgen_ty_2 = 170; +pub const NV_VGPU_MSG_FUNCTION_GET_PLCABLE_ADDRESS_KIND: _bindgen_ty_2 = 171; +pub const NV_VGPU_MSG_FUNCTION_CTRL_PERF_LIMITS_SET_STATUS_V2: _bindgen_ty_2 = 172; +pub const NV_VGPU_MSG_FUNCTION_CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM: _bindgen_ty_2 = 173; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_MMU_DEBUG_MODE: _bindgen_ty_2 = 174; +pub const NV_VGPU_MSG_FUNCTION_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS: _bindgen_ty_2 = 175; +pub const NV_VGPU_MSG_FUNCTION_CTRL_FLCN_GET_CTX_BUFFER_SIZE: _bindgen_ty_2 = 176; +pub const NV_VGPU_MSG_FUNCTION_CTRL_FLCN_GET_CTX_BUFFER_INFO: _bindgen_ty_2 = 177; +pub const NV_VGPU_MSG_FUNCTION_DISABLE_CHANNELS: _bindgen_ty_2 = 178; +pub const NV_VGPU_MSG_FUNCTION_CTRL_FABRIC_MEMORY_DESCRIBE: _bindgen_ty_2 = 179; +pub const NV_VGPU_MSG_FUNCTION_CTRL_FABRIC_MEM_STATS: _bindgen_ty_2 = 180; +pub const NV_VGPU_MSG_FUNCTION_SAVE_HIBERNATION_DATA: _bindgen_ty_2 = 181; +pub const NV_VGPU_MSG_FUNCTION_RESTORE_HIBERNATION_DATA: _bindgen_ty_2 = 182; +pub const NV_VGPU_MSG_FUNCTION_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED: _bindgen_ty_2 = 183; +pub const NV_VGPU_MSG_FUNCTION_CTRL_EXEC_PARTITIONS_CREATE: _bindgen_ty_2 = 184; +pub const NV_VGPU_MSG_FUNCTION_CTRL_EXEC_PARTITIONS_DELETE: _bindgen_ty_2 = 185; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN: _bindgen_ty_2 = 186; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX: _bindgen_ty_2 = 187; +pub const NV_VGPU_MSG_FUNCTION_PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION: _bindgen_ty_2 = + 188; +pub const NV_VGPU_MSG_FUNCTION_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK: + _bindgen_ty_2 = 189; +pub const NV_VGPU_MSG_FUNCTION_SET_SYSMEM_DIRTY_PAGE_TRACKING_BUFFER: _bindgen_ty_2 = 190; +pub const NV_VGPU_MSG_FUNCTION_CTRL_SUBDEVICE_GET_P2P_CAPS: _bindgen_ty_2 = 191; +pub const NV_VGPU_MSG_FUNCTION_CTRL_BUS_SET_P2P_MAPPING: _bindgen_ty_2 = 192; +pub const NV_VGPU_MSG_FUNCTION_CTRL_BUS_UNSET_P2P_MAPPING: _bindgen_ty_2 = 193; +pub const NV_VGPU_MSG_FUNCTION_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK: _bindgen_ty_2 = 194; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GPU_MIGRATABLE_OPS: _bindgen_ty_2 = 195; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_TOTAL_HS_CREDITS: _bindgen_ty_2 = 196; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_HS_CREDITS: _bindgen_ty_2 = 197; +pub const NV_VGPU_MSG_FUNCTION_CTRL_SET_HS_CREDITS: _bindgen_ty_2 = 198; +pub const NV_VGPU_MSG_FUNCTION_CTRL_PM_AREA_PC_SAMPLER: _bindgen_ty_2 = 199; +pub const NV_VGPU_MSG_FUNCTION_INVALIDATE_TLB: _bindgen_ty_2 = 200; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GPU_QUERY_ECC_STATUS: _bindgen_ty_2 = 201; +pub const NV_VGPU_MSG_FUNCTION_ECC_NOTIFIER_WRITE_ACK: _bindgen_ty_2 = 202; +pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_GET_MODE_MMU_DEBUG: _bindgen_ty_2 = 203; +pub const NV_VGPU_MSG_FUNCTION_RM_API_CONTROL: _bindgen_ty_2 = 204; +pub const NV_VGPU_MSG_FUNCTION_CTRL_CMD_INTERNAL_GPU_START_FABRIC_PROBE: _bindgen_ty_2 = 205; +pub const NV_VGPU_MSG_FUNCTION_CTRL_NVLINK_GET_INBAND_RECEIVED_DATA: _bindgen_ty_2 = 206; +pub const NV_VGPU_MSG_FUNCTION_GET_STATIC_DATA: _bindgen_ty_2 = 207; +pub const NV_VGPU_MSG_FUNCTION_RESERVED_208: _bindgen_ty_2 = 208; +pub const NV_VGPU_MSG_FUNCTION_CTRL_GPU_GET_INFO_V2: _bindgen_ty_2 = 209; +pub const NV_VGPU_MSG_FUNCTION_GET_BRAND_CAPS: _bindgen_ty_2 = 210; +pub const NV_VGPU_MSG_FUNCTION_CTRL_CMD_NVLINK_INBAND_SEND_DATA: _bindgen_ty_2 = 211; +pub const NV_VGPU_MSG_FUNCTION_UPDATE_GPM_GUEST_BUFFER_INFO: _bindgen_ty_2 = 212; +pub const NV_VGPU_MSG_FUNCTION_CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE: _bindgen_ty_2 = 213; +pub const NV_VGPU_MSG_FUNCTION_CTRL_SET_ZBC_STENCIL_CLEAR: _bindgen_ty_2 = 214; +pub const NV_VGPU_MSG_FUNCTION_CTRL_SUBDEVICE_GET_VGPU_HEAP_STATS: _bindgen_ty_2 = 215; +pub const NV_VGPU_MSG_FUNCTION_CTRL_SUBDEVICE_GET_LIBOS_HEAP_STATS: _bindgen_ty_2 = 216; +pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_MODE_MMU_GCC_DEBUG: _bindgen_ty_2 = 217; +pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_GET_MODE_MMU_GCC_DEBUG: _bindgen_ty_2 = 218; +pub const NV_VGPU_MSG_FUNCTION_CTRL_RESERVE_HES: _bindgen_ty_2 = 219; +pub const NV_VGPU_MSG_FUNCTION_CTRL_RELEASE_HES: _bindgen_ty_2 = 220; +pub const NV_VGPU_MSG_FUNCTION_CTRL_RESERVE_CCU_PROF: _bindgen_ty_2 = 221; +pub const NV_VGPU_MSG_FUNCTION_CTRL_RELEASE_CCU_PROF: _bindgen_ty_2 = 222; +pub const NV_VGPU_MSG_FUNCTION_RESERVED: _bindgen_ty_2 = 223; +pub const NV_VGPU_MSG_FUNCTION_CTRL_CMD_GET_CHIPLET_HS_CREDIT_POOL: _bindgen_ty_2 = 224; +pub const NV_VGPU_MSG_FUNCTION_CTRL_CMD_GET_HS_CREDITS_MAPPING: _bindgen_ty_2 = 225; +pub const NV_VGPU_MSG_FUNCTION_CTRL_EXEC_PARTITIONS_EXPORT: _bindgen_ty_2 = 226; +pub const NV_VGPU_MSG_FUNCTION_NUM_FUNCTIONS: _bindgen_ty_2 = 227; +pub type _bindgen_ty_2 = ffi::c_uint; +pub const NV_VGPU_MSG_EVENT_FIRST_EVENT: _bindgen_ty_3 = 4096; +pub const NV_VGPU_MSG_EVENT_GSP_INIT_DONE: _bindgen_ty_3 = 4097; +pub const NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER: _bindgen_ty_3 = 4098; +pub const NV_VGPU_MSG_EVENT_POST_EVENT: _bindgen_ty_3 = 4099; +pub const NV_VGPU_MSG_EVENT_RC_TRIGGERED: _bindgen_ty_3 = 4100; +pub const NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED: _bindgen_ty_3 = 4101; +pub const NV_VGPU_MSG_EVENT_OS_ERROR_LOG: _bindgen_ty_3 = 4102; +pub const NV_VGPU_MSG_EVENT_RG_LINE_INTR: _bindgen_ty_3 = 4103; +pub const NV_VGPU_MSG_EVENT_GPUACCT_PERFMON_UTIL_SAMPLES: _bindgen_ty_3 = 4104; +pub const NV_VGPU_MSG_EVENT_SIM_READ: _bindgen_ty_3 = 4105; +pub const NV_VGPU_MSG_EVENT_SIM_WRITE: _bindgen_ty_3 = 4106; +pub const NV_VGPU_MSG_EVENT_SEMAPHORE_SCHEDULE_CALLBACK: _bindgen_ty_3 = 4107; +pub const NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT: _bindgen_ty_3 = 4108; +pub const NV_VGPU_MSG_EVENT_VGPU_GSP_PLUGIN_TRIGGERED: _bindgen_ty_3 = 4109; +pub const NV_VGPU_MSG_EVENT_PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK: _bindgen_ty_3 = 4110; +pub const NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE: _bindgen_ty_3 = 4111; +pub const NV_VGPU_MSG_EVENT_VGPU_CONFIG: _bindgen_ty_3 = 4112; +pub const NV_VGPU_MSG_EVENT_DISPLAY_MODESET: _bindgen_ty_3 = 4113; +pub const NV_VGPU_MSG_EVENT_EXTDEV_INTR_SERVICE: _bindgen_ty_3 = 4114; +pub const NV_VGPU_MSG_EVENT_NVLINK_INBAND_RECEIVED_DATA_256: _bindgen_ty_3 = 4115; +pub const NV_VGPU_MSG_EVENT_NVLINK_INBAND_RECEIVED_DATA_512: _bindgen_ty_3 = 4116; +pub const NV_VGPU_MSG_EVENT_NVLINK_INBAND_RECEIVED_DATA_1024: _bindgen_ty_3 = 4117; +pub const NV_VGPU_MSG_EVENT_NVLINK_INBAND_RECEIVED_DATA_2048: _bindgen_ty_3 = 4118; +pub const NV_VGPU_MSG_EVENT_NVLINK_INBAND_RECEIVED_DATA_4096: _bindgen_ty_3 = 4119; +pub const NV_VGPU_MSG_EVENT_TIMED_SEMAPHORE_RELEASE: _bindgen_ty_3 = 4120; +pub const NV_VGPU_MSG_EVENT_NVLINK_IS_GPU_DEGRADED: _bindgen_ty_3 = 4121; +pub const NV_VGPU_MSG_EVENT_PFM_REQ_HNDLR_STATE_SYNC_CALLBACK: _bindgen_ty_3 = 4122; +pub const NV_VGPU_MSG_EVENT_NVLINK_FAULT_UP: _bindgen_ty_3 = 4123; +pub const NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE: _bindgen_ty_3 = 4124; +pub const NV_VGPU_MSG_EVENT_MIG_CI_CONFIG_UPDATE: _bindgen_ty_3 = 4125; +pub const NV_VGPU_MSG_EVENT_UPDATE_GSP_TRACE: _bindgen_ty_3 = 4126; +pub const NV_VGPU_MSG_EVENT_NVLINK_FATAL_ERROR_RECOVERY: _bindgen_ty_3 = 4127; +pub const NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD: _bindgen_ty_3 = 4128; +pub const NV_VGPU_MSG_EVENT_FECS_ERROR: _bindgen_ty_3 = 4129; +pub const NV_VGPU_MSG_EVENT_RECOVERY_ACTION: _bindgen_ty_3 = 4130; +pub const NV_VGPU_MSG_EVENT_NUM_EVENTS: _bindgen_ty_3 = 4131; +pub type _bindgen_ty_3 = ffi::c_uint; +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS { + pub totalVFs: u32_, + pub firstVfOffset: u32_, + pub vfFeatureMask: u32_, + pub FirstVFBar0Address: u64_, + pub FirstVFBar1Address: u64_, + pub FirstVFBar2Address: u64_, + pub bar0Size: u64_, + pub bar1Size: u64_, + pub bar2Size: u64_, + pub b64bitBar0: u8_, + pub b64bitBar1: u8_, + pub b64bitBar2: u8_, + pub bSriovEnabled: u8_, + pub bSriovHeavyEnabled: u8_, + pub bEmulateVFBar0TlbInvalidationRegister: u8_, + pub bClientRmAllocatedCtxBuffer: u8_, + pub bNonPowerOf2ChannelCountSupported: u8_, + pub bVfResizableBAR1Supported: u8_, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS { + pub BoardID: u32_, + pub chipSKU: [ffi::c_char; 9usize], + pub chipSKUMod: [ffi::c_char; 5usize], + pub skuConfigVersion: u32_, + pub project: [ffi::c_char; 5usize], + pub projectSKU: [ffi::c_char; 5usize], + pub CDP: [ffi::c_char; 6usize], + pub projectSKUMod: [ffi::c_char; 2usize], + pub businessCycle: u32_, +} +pub type NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG = [u8_; 17usize]; +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO { + pub base: u64_, + pub limit: u64_, + pub reserved: u64_, + pub performance: u32_, + pub supportCompressed: u8_, + pub supportISO: u8_, + pub bProtected: u8_, + pub blackList: NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS { + pub numFBRegions: u32_, + pub fbRegion: [NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO; 16usize], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS { + pub index: u32_, + pub flags: u32_, + pub length: u32_, + pub data: [u8_; 256usize], +} +impl Default for NV2080_CTRL_GPU_GET_GID_INFO_PARAMS { + fn default() -> Self { + let mut s = ::core::mem::MaybeUninit::<Self>::uninit(); + unsafe { + ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone, Zeroable)] +pub struct DOD_METHOD_DATA { + pub status: u32_, + pub acpiIdListLen: u32_, + pub acpiIdList: [u32_; 16usize], +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone, Zeroable)] +pub struct JT_METHOD_DATA { + pub status: u32_, + pub jtCaps: u32_, + pub jtRevId: u16_, + pub bSBIOSCaps: u8_, + pub __bindgen_padding_0: u8, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone, Zeroable)] +pub struct MUX_METHOD_DATA_ELEMENT { + pub acpiId: u32_, + pub mode: u32_, + pub status: u32_, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone, Zeroable)] +pub struct MUX_METHOD_DATA { + pub tableLen: u32_, + pub acpiIdMuxModeTable: [MUX_METHOD_DATA_ELEMENT; 16usize], + pub acpiIdMuxPartTable: [MUX_METHOD_DATA_ELEMENT; 16usize], + pub acpiIdMuxStateTable: [MUX_METHOD_DATA_ELEMENT; 16usize], +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone, Zeroable)] +pub struct CAPS_METHOD_DATA { + pub status: u32_, + pub optimusCaps: u32_, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone, Zeroable)] +pub struct ACPI_METHOD_DATA { + pub bValid: u8_, + pub __bindgen_padding_0: [u8; 3usize], + pub dodMethodData: DOD_METHOD_DATA, + pub jtMethodData: JT_METHOD_DATA, + pub muxMethodData: MUX_METHOD_DATA, + pub capsMethodData: CAPS_METHOD_DATA, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS { + pub headIndex: u32_, + pub maxHResolution: u32_, + pub maxVResolution: u32_, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS { + pub numHeads: u32_, + pub maxNumHeads: u32_, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone, Zeroable)] +pub struct BUSINFO { + pub deviceID: u16_, + pub vendorID: u16_, + pub subdeviceID: u16_, + pub subvendorID: u16_, + pub revisionID: u8_, + pub __bindgen_padding_0: u8, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone, Zeroable)] +pub struct GSP_VF_INFO { + pub totalVFs: u32_, + pub firstVFOffset: u32_, + pub FirstVFBar0Address: u64_, + pub FirstVFBar1Address: u64_, + pub FirstVFBar2Address: u64_, + pub b64bitBar0: u8_, + pub b64bitBar1: u8_, + pub b64bitBar2: u8_, + pub __bindgen_padding_0: [u8; 5usize], +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone, Zeroable)] +pub struct GSP_PCIE_CONFIG_REG { + pub linkCap: u32_, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct EcidManufacturingInfo { + pub ecidLow: u32_, + pub ecidHigh: u32_, + pub ecidExtended: u32_, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct FW_WPR_LAYOUT_OFFSET { + pub nonWprHeapOffset: u64_, + pub frtsOffset: u64_, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct GspStaticConfigInfo_t { + pub grCapsBits: [u8_; 23usize], + pub gidInfo: NV2080_CTRL_GPU_GET_GID_INFO_PARAMS, + pub SKUInfo: NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS, + pub fbRegionInfoParams: NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS, + pub sriovCaps: NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS, + pub sriovMaxGfid: u32_, + pub engineCaps: [u32_; 3usize], + pub poisonFuseEnabled: u8_, + pub fb_length: u64_, + pub fbio_mask: u64_, + pub fb_bus_width: u32_, + pub fb_ram_type: u32_, + pub fbp_mask: u64_, + pub l2_cache_size: u32_, + pub gpuNameString: [u8_; 64usize], + pub gpuShortNameString: [u8_; 64usize], + pub gpuNameString_Unicode: [u16_; 64usize], + pub bGpuInternalSku: u8_, + pub bIsQuadroGeneric: u8_, + pub bIsQuadroAd: u8_, + pub bIsNvidiaNvs: u8_, + pub bIsVgx: u8_, + pub bGeforceSmb: u8_, + pub bIsTitan: u8_, + pub bIsTesla: u8_, + pub bIsMobile: u8_, + pub bIsGc6Rtd3Allowed: u8_, + pub bIsGc8Rtd3Allowed: u8_, + pub bIsGcOffRtd3Allowed: u8_, + pub bIsGcoffLegacyAllowed: u8_, + pub bIsMigSupported: u8_, + pub RTD3GC6TotalBoardPower: u16_, + pub RTD3GC6PerstDelay: u16_, + pub bar1PdeBase: u64_, + pub bar2PdeBase: u64_, + pub bVbiosValid: u8_, + pub vbiosSubVendor: u32_, + pub vbiosSubDevice: u32_, + pub bPageRetirementSupported: u8_, + pub bSplitVasBetweenServerClientRm: u8_, + pub bClRootportNeedsNosnoopWAR: u8_, + pub displaylessMaxHeads: VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS, + pub displaylessMaxResolution: VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS, + pub displaylessMaxPixels: u64_, + pub hInternalClient: u32_, + pub hInternalDevice: u32_, + pub hInternalSubdevice: u32_, + pub bSelfHostedMode: u8_, + pub bAtsSupported: u8_, + pub bIsGpuUefi: u8_, + pub bIsEfiInit: u8_, + pub ecidInfo: [EcidManufacturingInfo; 2usize], + pub fwWprLayoutOffset: FW_WPR_LAYOUT_OFFSET, +} +impl Default for GspStaticConfigInfo_t { + fn default() -> Self { + let mut s = ::core::mem::MaybeUninit::<Self>::uninit(); + unsafe { + ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone, Zeroable)] +pub struct GspSystemInfo { + pub gpuPhysAddr: u64_, + pub gpuPhysFbAddr: u64_, + pub gpuPhysInstAddr: u64_, + pub gpuPhysIoAddr: u64_, + pub nvDomainBusDeviceFunc: u64_, + pub simAccessBufPhysAddr: u64_, + pub notifyOpSharedSurfacePhysAddr: u64_, + pub pcieAtomicsOpMask: u64_, + pub consoleMemSize: u64_, + pub maxUserVa: u64_, + pub pciConfigMirrorBase: u32_, + pub pciConfigMirrorSize: u32_, + pub PCIDeviceID: u32_, + pub PCISubDeviceID: u32_, + pub PCIRevisionID: u32_, + pub pcieAtomicsCplDeviceCapMask: u32_, + pub oorArch: u8_, + pub __bindgen_padding_0: [u8; 7usize], + pub clPdbProperties: u64_, + pub Chipset: u32_, + pub bGpuBehindBridge: u8_, + pub bFlrSupported: u8_, + pub b64bBar0Supported: u8_, + pub bMnocAvailable: u8_, + pub chipsetL1ssEnable: u32_, + pub bUpstreamL0sUnsupported: u8_, + pub bUpstreamL1Unsupported: u8_, + pub bUpstreamL1PorSupported: u8_, + pub bUpstreamL1PorMobileOnly: u8_, + pub bSystemHasMux: u8_, + pub upstreamAddressValid: u8_, + pub FHBBusInfo: BUSINFO, + pub chipsetIDInfo: BUSINFO, + pub __bindgen_padding_1: [u8; 2usize], + pub acpiMethodData: ACPI_METHOD_DATA, + pub hypervisorType: u32_, + pub bIsPassthru: u8_, + pub __bindgen_padding_2: [u8; 7usize], + pub sysTimerOffsetNs: u64_, + pub gspVFInfo: GSP_VF_INFO, + pub bIsPrimary: u8_, + pub isGridBuild: u8_, + pub __bindgen_padding_3: [u8; 2usize], + pub pcieConfigReg: GSP_PCIE_CONFIG_REG, + pub gridBuildCsp: u32_, + pub bPreserveVideoMemoryAllocations: u8_, + pub bTdrEventSupported: u8_, + pub bFeatureStretchVblankCapable: u8_, + pub bEnableDynamicGranularityPageArrays: u8_, + pub bClockBoostSupported: u8_, + pub bRouteDispIntrsToCPU: u8_, + pub __bindgen_padding_4: [u8; 6usize], + pub hostPageSize: u64_, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone, Zeroable)] +pub struct MESSAGE_QUEUE_INIT_ARGUMENTS { + pub sharedMemPhysAddr: u64_, + pub pageTableEntryCount: u32_, + pub __bindgen_padding_0: [u8; 4usize], + pub cmdQueueOffset: u64_, + pub statQueueOffset: u64_, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone, Zeroable)] +pub struct GSP_SR_INIT_ARGUMENTS { + pub oldLevel: u32_, + pub flags: u32_, + pub bInPMTransition: u8_, + pub __bindgen_padding_0: [u8; 3usize], +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone, Zeroable)] +pub struct GSP_ARGUMENTS_CACHED { + pub messageQueueInitArguments: MESSAGE_QUEUE_INIT_ARGUMENTS, + pub srInitArguments: GSP_SR_INIT_ARGUMENTS, + pub gpuInstance: u32_, + pub bDmemStack: u8_, + pub __bindgen_padding_0: [u8; 7usize], + pub profilerArgs: GSP_ARGUMENTS_CACHED__bindgen_ty_1, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone, Zeroable)] +pub struct GSP_ARGUMENTS_CACHED__bindgen_ty_1 { + pub pa: u64_, + pub size: u64_, +} +#[repr(C)] +#[derive(Copy, Clone, Zeroable)] +pub union rpc_message_rpc_union_field_v03_00 { + pub spare: u32_, + pub cpuRmGfid: u32_, +} +impl Default for rpc_message_rpc_union_field_v03_00 { + fn default() -> Self { + let mut s = ::core::mem::MaybeUninit::<Self>::uninit(); + unsafe { + ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +pub type rpc_message_rpc_union_field_v = rpc_message_rpc_union_field_v03_00; +#[repr(C)] +pub struct rpc_message_header_v03_00 { + pub header_version: u32_, + pub signature: u32_, + pub length: u32_, + pub function: u32_, + pub rpc_result: u32_, + pub rpc_result_private: u32_, + pub sequence: u32_, + pub u: rpc_message_rpc_union_field_v, + pub rpc_message_data: __IncompleteArrayField<u8_>, +} +impl Default for rpc_message_header_v03_00 { + fn default() -> Self { + let mut s = ::core::mem::MaybeUninit::<Self>::uninit(); + unsafe { + ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +pub type rpc_message_header_v = rpc_message_header_v03_00; +#[repr(C)] +#[derive(Copy, Clone, Zeroable)] +pub struct GspFwWprMeta { + pub magic: u64_, + pub revision: u64_, + pub sysmemAddrOfRadix3Elf: u64_, + pub sizeOfRadix3Elf: u64_, + pub sysmemAddrOfBootloader: u64_, + pub sizeOfBootloader: u64_, + pub bootloaderCodeOffset: u64_, + pub bootloaderDataOffset: u64_, + pub bootloaderManifestOffset: u64_, + pub __bindgen_anon_1: GspFwWprMeta__bindgen_ty_1, + pub gspFwRsvdStart: u64_, + pub nonWprHeapOffset: u64_, + pub nonWprHeapSize: u64_, + pub gspFwWprStart: u64_, + pub gspFwHeapOffset: u64_, + pub gspFwHeapSize: u64_, + pub gspFwOffset: u64_, + pub bootBinOffset: u64_, + pub frtsOffset: u64_, + pub frtsSize: u64_, + pub gspFwWprEnd: u64_, + pub fbSize: u64_, + pub vgaWorkspaceOffset: u64_, + pub vgaWorkspaceSize: u64_, + pub bootCount: u64_, + pub __bindgen_anon_2: GspFwWprMeta__bindgen_ty_2, + pub gspFwHeapVfPartitionCount: u8_, + pub flags: u8_, + pub padding: [u8_; 2usize], + pub pmuReservedSize: u32_, + pub verified: u64_, +} +#[repr(C)] +#[derive(Copy, Clone, Zeroable)] +pub union GspFwWprMeta__bindgen_ty_1 { + pub __bindgen_anon_1: GspFwWprMeta__bindgen_ty_1__bindgen_ty_1, + pub __bindgen_anon_2: GspFwWprMeta__bindgen_ty_1__bindgen_ty_2, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone, Zeroable)] +pub struct GspFwWprMeta__bindgen_ty_1__bindgen_ty_1 { + pub sysmemAddrOfSignature: u64_, + pub sizeOfSignature: u64_, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone, Zeroable)] +pub struct GspFwWprMeta__bindgen_ty_1__bindgen_ty_2 { + pub gspFwHeapFreeListWprOffset: u32_, + pub unused0: u32_, + pub unused1: u64_, +} +impl Default for GspFwWprMeta__bindgen_ty_1 { + fn default() -> Self { + let mut s = ::core::mem::MaybeUninit::<Self>::uninit(); + unsafe { + ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Copy, Clone, Zeroable)] +pub union GspFwWprMeta__bindgen_ty_2 { + pub __bindgen_anon_1: GspFwWprMeta__bindgen_ty_2__bindgen_ty_1, + pub __bindgen_anon_2: GspFwWprMeta__bindgen_ty_2__bindgen_ty_2, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone, Zeroable)] +pub struct GspFwWprMeta__bindgen_ty_2__bindgen_ty_1 { + pub partitionRpcAddr: u64_, + pub partitionRpcRequestOffset: u16_, + pub partitionRpcReplyOffset: u16_, + pub elfCodeOffset: u32_, + pub elfDataOffset: u32_, + pub elfCodeSize: u32_, + pub elfDataSize: u32_, + pub lsUcodeVersion: u32_, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone, Zeroable)] +pub struct GspFwWprMeta__bindgen_ty_2__bindgen_ty_2 { + pub partitionRpcPadding: [u32_; 4usize], + pub sysmemAddrOfCrashReportQueue: u64_, + pub sizeOfCrashReportQueue: u32_, + pub lsUcodeVersionPadding: [u32_; 1usize], +} +impl Default for GspFwWprMeta__bindgen_ty_2 { + fn default() -> Self { + let mut s = ::core::mem::MaybeUninit::<Self>::uninit(); + unsafe { + ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl Default for GspFwWprMeta { + fn default() -> Self { + let mut s = ::core::mem::MaybeUninit::<Self>::uninit(); + unsafe { + ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +pub type LibosAddress = u64_; +pub const LibosMemoryRegionKind_LIBOS_MEMORY_REGION_NONE: LibosMemoryRegionKind = 0; +pub const LibosMemoryRegionKind_LIBOS_MEMORY_REGION_CONTIGUOUS: LibosMemoryRegionKind = 1; +pub const LibosMemoryRegionKind_LIBOS_MEMORY_REGION_RADIX3: LibosMemoryRegionKind = 2; +pub type LibosMemoryRegionKind = ffi::c_uint; +pub const LibosMemoryRegionLoc_LIBOS_MEMORY_REGION_LOC_NONE: LibosMemoryRegionLoc = 0; +pub const LibosMemoryRegionLoc_LIBOS_MEMORY_REGION_LOC_SYSMEM: LibosMemoryRegionLoc = 1; +pub const LibosMemoryRegionLoc_LIBOS_MEMORY_REGION_LOC_FB: LibosMemoryRegionLoc = 2; +pub type LibosMemoryRegionLoc = ffi::c_uint; +#[repr(C)] +#[derive(Debug, Default, Copy, Clone, Zeroable)] +pub struct LibosMemoryRegionInitArgument { + pub id8: LibosAddress, + pub pa: LibosAddress, + pub size: LibosAddress, + pub kind: u8_, + pub loc: u8_, + pub __bindgen_padding_0: [u8; 6usize], +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct PACKED_REGISTRY_ENTRY { + pub nameOffset: u32_, + pub type_: u8_, + pub __bindgen_padding_0: [u8; 3usize], + pub data: u32_, + pub length: u32_, +} +#[repr(C)] +#[derive(Debug, Default)] +pub struct PACKED_REGISTRY_TABLE { + pub size: u32_, + pub numEntries: u32_, + pub entries: __IncompleteArrayField<PACKED_REGISTRY_ENTRY>, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone, Zeroable)] +pub struct msgqTxHeader { + pub version: u32_, + pub size: u32_, + pub msgSize: u32_, + pub msgCount: u32_, + pub writePtr: u32_, + pub flags: u32_, + pub rxHdrOff: u32_, + pub entryOff: u32_, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone, Zeroable)] +pub struct msgqRxHeader { + pub readPtr: u32_, +} +#[repr(C)] +#[repr(align(8))] +#[derive(Zeroable)] +pub struct GSP_MSG_QUEUE_ELEMENT { + pub authTagBuffer: [u8_; 16usize], + pub aadBuffer: [u8_; 16usize], + pub checkSum: u32_, + pub seqNum: u32_, + pub elemCount: u32_, + pub __bindgen_padding_0: [u8; 4usize], + pub rpc: rpc_message_header_v, +} +impl Default for GSP_MSG_QUEUE_ELEMENT { + fn default() -> Self { + let mut s = ::core::mem::MaybeUninit::<Self>::uninit(); + unsafe { + ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Debug, Default)] +pub struct rpc_run_cpu_sequencer_v17_00 { + pub bufferSizeDWord: u32_, + pub cmdIndex: u32_, + pub regSaveArea: [u32_; 8usize], + pub commandBuffer: __IncompleteArrayField<u32_>, +} +pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE: GSP_SEQ_BUF_OPCODE = 0; +pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY: GSP_SEQ_BUF_OPCODE = 1; +pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL: GSP_SEQ_BUF_OPCODE = 2; +pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US: GSP_SEQ_BUF_OPCODE = 3; +pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE: GSP_SEQ_BUF_OPCODE = 4; +pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET: GSP_SEQ_BUF_OPCODE = 5; +pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START: GSP_SEQ_BUF_OPCODE = 6; +pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT: GSP_SEQ_BUF_OPCODE = 7; +pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME: GSP_SEQ_BUF_OPCODE = 8; +pub type GSP_SEQ_BUF_OPCODE = ffi::c_uint; +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct GSP_SEQ_BUF_PAYLOAD_REG_WRITE { + pub addr: u32_, + pub val: u32_, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct GSP_SEQ_BUF_PAYLOAD_REG_MODIFY { + pub addr: u32_, + pub mask: u32_, + pub val: u32_, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct GSP_SEQ_BUF_PAYLOAD_REG_POLL { + pub addr: u32_, + pub mask: u32_, + pub val: u32_, + pub timeout: u32_, + pub error: u32_, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct GSP_SEQ_BUF_PAYLOAD_DELAY_US { + pub val: u32_, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct GSP_SEQ_BUF_PAYLOAD_REG_STORE { + pub addr: u32_, + pub index: u32_, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct GSP_SEQUENCER_BUFFER_CMD { + pub opCode: GSP_SEQ_BUF_OPCODE, + pub payload: GSP_SEQUENCER_BUFFER_CMD__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union GSP_SEQUENCER_BUFFER_CMD__bindgen_ty_1 { + pub regWrite: GSP_SEQ_BUF_PAYLOAD_REG_WRITE, + pub regModify: GSP_SEQ_BUF_PAYLOAD_REG_MODIFY, + pub regPoll: GSP_SEQ_BUF_PAYLOAD_REG_POLL, + pub delayUs: GSP_SEQ_BUF_PAYLOAD_DELAY_US, + pub regStore: GSP_SEQ_BUF_PAYLOAD_REG_STORE, +} +impl Default for GSP_SEQUENCER_BUFFER_CMD__bindgen_ty_1 { + fn default() -> Self { + let mut s = ::core::mem::MaybeUninit::<Self>::uninit(); + unsafe { + ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl Default for GSP_SEQUENCER_BUFFER_CMD { + fn default() -> Self { + let mut s = ::core::mem::MaybeUninit::<Self>::uninit(); + unsafe { + ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} diff --git a/drivers/gpu/nova-core/gsp/sequencer.rs b/drivers/gpu/nova-core/gsp/sequencer.rs new file mode 100644 index 000000000000..2d0369c49092 --- /dev/null +++ b/drivers/gpu/nova-core/gsp/sequencer.rs @@ -0,0 +1,407 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! GSP Sequencer implementation for Pre-hopper GSP boot sequence. + +use core::{ + array, + mem::{ + size_of, + size_of_val, // + }, +}; + +use kernel::{ + device, + io::poll::read_poll_timeout, + prelude::*, + time::{ + delay::fsleep, + Delta, // + }, + transmute::FromBytes, + types::ARef, // +}; + +use crate::{ + driver::Bar0, + falcon::{ + gsp::Gsp, + sec2::Sec2, + Falcon, // + }, + gsp::{ + cmdq::{ + Cmdq, + MessageFromGsp, // + }, + fw, + }, + num::FromSafeCast, + sbuffer::SBufferIter, +}; + +/// GSP Sequencer information containing the command sequence and data. +struct GspSequence { + /// Current command index for error reporting. + cmd_index: u32, + /// Command data buffer containing the sequence of commands. + cmd_data: KVec<u8>, +} + +impl MessageFromGsp for GspSequence { + const FUNCTION: fw::MsgFunction = fw::MsgFunction::GspRunCpuSequencer; + type InitError = Error; + type Message = fw::RunCpuSequencer; + + fn read( + msg: &Self::Message, + sbuffer: &mut SBufferIter<array::IntoIter<&[u8], 2>>, + ) -> Result<Self, Self::InitError> { + let cmd_data = sbuffer.flush_into_kvec(GFP_KERNEL)?; + Ok(GspSequence { + cmd_index: msg.cmd_index(), + cmd_data, + }) + } +} + +const CMD_SIZE: usize = size_of::<fw::SequencerBufferCmd>(); + +/// GSP Sequencer Command types with payload data. +/// Commands have an opcode and an opcode-dependent struct. +#[allow(clippy::enum_variant_names)] +pub(crate) enum GspSeqCmd { + RegWrite(fw::RegWritePayload), + RegModify(fw::RegModifyPayload), + RegPoll(fw::RegPollPayload), + DelayUs(fw::DelayUsPayload), + RegStore(fw::RegStorePayload), + CoreReset, + CoreStart, + CoreWaitForHalt, + CoreResume, +} + +impl GspSeqCmd { + /// Creates a new `GspSeqCmd` from raw data returning the command and its size in bytes. + pub(crate) fn new(data: &[u8], dev: &device::Device) -> Result<(Self, usize)> { + let fw_cmd = fw::SequencerBufferCmd::from_bytes(data).ok_or(EINVAL)?; + let opcode_size = core::mem::size_of::<u32>(); + + let (cmd, size) = match fw_cmd.opcode()? { + fw::SeqBufOpcode::RegWrite => { + let payload = fw_cmd.reg_write_payload()?; + let size = opcode_size + size_of_val(&payload); + (GspSeqCmd::RegWrite(payload), size) + } + fw::SeqBufOpcode::RegModify => { + let payload = fw_cmd.reg_modify_payload()?; + let size = opcode_size + size_of_val(&payload); + (GspSeqCmd::RegModify(payload), size) + } + fw::SeqBufOpcode::RegPoll => { + let payload = fw_cmd.reg_poll_payload()?; + let size = opcode_size + size_of_val(&payload); + (GspSeqCmd::RegPoll(payload), size) + } + fw::SeqBufOpcode::DelayUs => { + let payload = fw_cmd.delay_us_payload()?; + let size = opcode_size + size_of_val(&payload); + (GspSeqCmd::DelayUs(payload), size) + } + fw::SeqBufOpcode::RegStore => { + let payload = fw_cmd.reg_store_payload()?; + let size = opcode_size + size_of_val(&payload); + (GspSeqCmd::RegStore(payload), size) + } + fw::SeqBufOpcode::CoreReset => (GspSeqCmd::CoreReset, opcode_size), + fw::SeqBufOpcode::CoreStart => (GspSeqCmd::CoreStart, opcode_size), + fw::SeqBufOpcode::CoreWaitForHalt => (GspSeqCmd::CoreWaitForHalt, opcode_size), + fw::SeqBufOpcode::CoreResume => (GspSeqCmd::CoreResume, opcode_size), + }; + + if data.len() < size { + dev_err!(dev, "Data is not enough for command"); + return Err(EINVAL); + } + + Ok((cmd, size)) + } +} + +/// GSP Sequencer for executing firmware commands during boot. +pub(crate) struct GspSequencer<'a> { + /// Sequencer information with command data. + seq_info: GspSequence, + /// `Bar0` for register access. + bar: &'a Bar0, + /// SEC2 falcon for core operations. + sec2_falcon: &'a Falcon<Sec2>, + /// GSP falcon for core operations. + gsp_falcon: &'a Falcon<Gsp>, + /// LibOS DMA handle address. + libos_dma_handle: u64, + /// Bootloader application version. + bootloader_app_version: u32, + /// Device for logging. + dev: ARef<device::Device>, +} + +/// Trait for running sequencer commands. +pub(crate) trait GspSeqCmdRunner { + fn run(&self, sequencer: &GspSequencer<'_>) -> Result; +} + +impl GspSeqCmdRunner for fw::RegWritePayload { + fn run(&self, sequencer: &GspSequencer<'_>) -> Result { + let addr = usize::from_safe_cast(self.addr()); + + sequencer.bar.try_write32(self.val(), addr) + } +} + +impl GspSeqCmdRunner for fw::RegModifyPayload { + fn run(&self, sequencer: &GspSequencer<'_>) -> Result { + let addr = usize::from_safe_cast(self.addr()); + + sequencer.bar.try_read32(addr).and_then(|val| { + sequencer + .bar + .try_write32((val & !self.mask()) | self.val(), addr) + }) + } +} + +impl GspSeqCmdRunner for fw::RegPollPayload { + fn run(&self, sequencer: &GspSequencer<'_>) -> Result { + let addr = usize::from_safe_cast(self.addr()); + + // Default timeout to 4 seconds. + let timeout_us = if self.timeout() == 0 { + 4_000_000 + } else { + i64::from(self.timeout()) + }; + + // First read. + sequencer.bar.try_read32(addr)?; + + // Poll the requested register with requested timeout. + read_poll_timeout( + || sequencer.bar.try_read32(addr), + |current| (current & self.mask()) == self.val(), + Delta::ZERO, + Delta::from_micros(timeout_us), + ) + .map(|_| ()) + } +} + +impl GspSeqCmdRunner for fw::DelayUsPayload { + fn run(&self, _sequencer: &GspSequencer<'_>) -> Result { + fsleep(Delta::from_micros(i64::from(self.val()))); + Ok(()) + } +} + +impl GspSeqCmdRunner for fw::RegStorePayload { + fn run(&self, sequencer: &GspSequencer<'_>) -> Result { + let addr = usize::from_safe_cast(self.addr()); + + sequencer.bar.try_read32(addr).map(|_| ()) + } +} + +impl GspSeqCmdRunner for GspSeqCmd { + fn run(&self, seq: &GspSequencer<'_>) -> Result { + match self { + GspSeqCmd::RegWrite(cmd) => cmd.run(seq), + GspSeqCmd::RegModify(cmd) => cmd.run(seq), + GspSeqCmd::RegPoll(cmd) => cmd.run(seq), + GspSeqCmd::DelayUs(cmd) => cmd.run(seq), + GspSeqCmd::RegStore(cmd) => cmd.run(seq), + GspSeqCmd::CoreReset => { + seq.gsp_falcon.reset(seq.bar)?; + seq.gsp_falcon.dma_reset(seq.bar); + Ok(()) + } + GspSeqCmd::CoreStart => { + seq.gsp_falcon.start(seq.bar)?; + Ok(()) + } + GspSeqCmd::CoreWaitForHalt => { + seq.gsp_falcon.wait_till_halted(seq.bar)?; + Ok(()) + } + GspSeqCmd::CoreResume => { + // At this point, 'SEC2-RTOS' has been loaded into SEC2 by the sequencer + // but neither SEC2-RTOS nor GSP-RM is running yet. This part of the + // sequencer will start both. + + // Reset the GSP to prepare it for resuming. + seq.gsp_falcon.reset(seq.bar)?; + + // Write the libOS DMA handle to GSP mailboxes. + seq.gsp_falcon.write_mailboxes( + seq.bar, + Some(seq.libos_dma_handle as u32), + Some((seq.libos_dma_handle >> 32) as u32), + ); + + // Start the SEC2 falcon which will trigger GSP-RM to resume on the GSP. + seq.sec2_falcon.start(seq.bar)?; + + // Poll until GSP-RM reload/resume has completed (up to 2 seconds). + seq.gsp_falcon + .check_reload_completed(seq.bar, Delta::from_secs(2))?; + + // Verify SEC2 completed successfully by checking its mailbox for errors. + let mbox0 = seq.sec2_falcon.read_mailbox0(seq.bar); + if mbox0 != 0 { + dev_err!(seq.dev, "Sequencer: sec2 errors: {:?}\n", mbox0); + return Err(EIO); + } + + // Configure GSP with the bootloader version. + seq.gsp_falcon + .write_os_version(seq.bar, seq.bootloader_app_version); + + // Verify the GSP's RISC-V core is active indicating successful GSP boot. + if !seq.gsp_falcon.is_riscv_active(seq.bar) { + dev_err!(seq.dev, "Sequencer: RISC-V core is not active\n"); + return Err(EIO); + } + Ok(()) + } + } + } +} + +/// Iterator over GSP sequencer commands. +pub(crate) struct GspSeqIter<'a> { + /// Command data buffer. + cmd_data: &'a [u8], + /// Current position in the buffer. + current_offset: usize, + /// Total number of commands to process. + total_cmds: u32, + /// Number of commands processed so far. + cmds_processed: u32, + /// Device for logging. + dev: ARef<device::Device>, +} + +impl<'a> Iterator for GspSeqIter<'a> { + type Item = Result<GspSeqCmd>; + + fn next(&mut self) -> Option<Self::Item> { + // Stop if we've processed all commands or reached the end of data. + if self.cmds_processed >= self.total_cmds || self.current_offset >= self.cmd_data.len() { + return None; + } + + // Check if we have enough data for opcode. + if self.current_offset + core::mem::size_of::<u32>() > self.cmd_data.len() { + return Some(Err(EIO)); + } + + let offset = self.current_offset; + + // Handle command creation based on available data, + // zero-pad if necessary (since last command may not be full size). + let mut buffer = [0u8; CMD_SIZE]; + let copy_len = if offset + CMD_SIZE <= self.cmd_data.len() { + CMD_SIZE + } else { + self.cmd_data.len() - offset + }; + buffer[..copy_len].copy_from_slice(&self.cmd_data[offset..offset + copy_len]); + let cmd_result = GspSeqCmd::new(&buffer, &self.dev); + + cmd_result.map_or_else( + |_err| { + dev_err!(self.dev, "Error parsing command at offset {}", offset); + None + }, + |(cmd, size)| { + self.current_offset += size; + self.cmds_processed += 1; + Some(Ok(cmd)) + }, + ) + } +} + +impl<'a> GspSequencer<'a> { + fn iter(&self) -> GspSeqIter<'_> { + let cmd_data = &self.seq_info.cmd_data[..]; + + GspSeqIter { + cmd_data, + current_offset: 0, + total_cmds: self.seq_info.cmd_index, + cmds_processed: 0, + dev: self.dev.clone(), + } + } +} + +/// Parameters for running the GSP sequencer. +pub(crate) struct GspSequencerParams<'a> { + /// Bootloader application version. + pub(crate) bootloader_app_version: u32, + /// LibOS DMA handle address. + pub(crate) libos_dma_handle: u64, + /// GSP falcon for core operations. + pub(crate) gsp_falcon: &'a Falcon<Gsp>, + /// SEC2 falcon for core operations. + pub(crate) sec2_falcon: &'a Falcon<Sec2>, + /// Device for logging. + pub(crate) dev: ARef<device::Device>, + /// BAR0 for register access. + pub(crate) bar: &'a Bar0, +} + +impl<'a> GspSequencer<'a> { + pub(crate) fn run(cmdq: &mut Cmdq, params: GspSequencerParams<'a>) -> Result { + let seq_info = loop { + match cmdq.receive_msg::<GspSequence>(Delta::from_secs(10)) { + Ok(seq_info) => break seq_info, + Err(ERANGE) => continue, + Err(e) => return Err(e), + } + }; + + let sequencer = GspSequencer { + seq_info, + bar: params.bar, + sec2_falcon: params.sec2_falcon, + gsp_falcon: params.gsp_falcon, + libos_dma_handle: params.libos_dma_handle, + bootloader_app_version: params.bootloader_app_version, + dev: params.dev, + }; + + dev_dbg!(sequencer.dev, "Running CPU Sequencer commands"); + + for cmd_result in sequencer.iter() { + match cmd_result { + Ok(cmd) => cmd.run(&sequencer)?, + Err(e) => { + dev_err!( + sequencer.dev, + "Error running command at index {}", + sequencer.seq_info.cmd_index + ); + return Err(e); + } + } + } + + dev_dbg!( + sequencer.dev, + "CPU Sequencer commands completed successfully" + ); + Ok(()) + } +} |
