diff options
Diffstat (limited to 'include/linux/hmm.h')
| -rw-r--r-- | include/linux/hmm.h | 59 |
1 files changed, 47 insertions, 12 deletions
diff --git a/include/linux/hmm.h b/include/linux/hmm.h index f4a09ed223ac..db75ffc949a7 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -4,19 +4,14 @@ * * Authors: Jérôme Glisse <jglisse@redhat.com> * - * See Documentation/vm/hmm.rst for reasons and overview of what HMM is. + * See Documentation/mm/hmm.rst for reasons and overview of what HMM is. */ #ifndef LINUX_HMM_H #define LINUX_HMM_H -#include <linux/kconfig.h> -#include <linux/pgtable.h> +#include <linux/mm.h> -#include <linux/device.h> -#include <linux/migrate.h> -#include <linux/memremap.h> -#include <linux/completion.h> -#include <linux/mmu_notifier.h> +struct mmu_interval_notifier; /* * On output: @@ -28,6 +23,10 @@ * HMM_PFN_WRITE - if the page memory can be written to (requires HMM_PFN_VALID) * HMM_PFN_ERROR - accessing the pfn is impossible and the device should * fail. ie poisoned memory, special pages, no vma, etc + * HMM_PFN_P2PDMA - P2P page + * HMM_PFN_P2PDMA_BUS - Bus mapped P2P transfer + * HMM_PFN_DMA_MAPPED - Flag preserved on input-to-output transformation + * to mark that page is already DMA mapped * * On input: * 0 - Return the current state of the page, do not fault it. @@ -37,16 +36,25 @@ * will fail. Must be combined with HMM_PFN_REQ_FAULT. */ enum hmm_pfn_flags { - /* Output flags */ + /* Output fields and flags */ HMM_PFN_VALID = 1UL << (BITS_PER_LONG - 1), HMM_PFN_WRITE = 1UL << (BITS_PER_LONG - 2), HMM_PFN_ERROR = 1UL << (BITS_PER_LONG - 3), + /* + * Sticky flags, carried from input to output, + * don't forget to update HMM_PFN_INOUT_FLAGS + */ + HMM_PFN_DMA_MAPPED = 1UL << (BITS_PER_LONG - 4), + HMM_PFN_P2PDMA = 1UL << (BITS_PER_LONG - 5), + HMM_PFN_P2PDMA_BUS = 1UL << (BITS_PER_LONG - 6), + + HMM_PFN_ORDER_SHIFT = (BITS_PER_LONG - 11), /* Input flags */ HMM_PFN_REQ_FAULT = HMM_PFN_VALID, HMM_PFN_REQ_WRITE = HMM_PFN_WRITE, - HMM_PFN_FLAGS = HMM_PFN_VALID | HMM_PFN_WRITE | HMM_PFN_ERROR, + HMM_PFN_FLAGS = ~((1UL << HMM_PFN_ORDER_SHIFT) - 1), }; /* @@ -62,6 +70,33 @@ static inline struct page *hmm_pfn_to_page(unsigned long hmm_pfn) } /* + * hmm_pfn_to_phys() - return physical address pointed to by a device entry + */ +static inline phys_addr_t hmm_pfn_to_phys(unsigned long hmm_pfn) +{ + return __pfn_to_phys(hmm_pfn & ~HMM_PFN_FLAGS); +} + +/* + * hmm_pfn_to_map_order() - return the CPU mapping size order + * + * This is optionally useful to optimize processing of the pfn result + * array. It indicates that the page starts at the order aligned VA and is + * 1<<order bytes long. Every pfn within an high order page will have the + * same pfn flags, both access protections and the map_order. The caller must + * be careful with edge cases as the start and end VA of the given page may + * extend past the range used with hmm_range_fault(). + * + * This must be called under the caller 'user_lock' after a successful + * mmu_interval_read_begin(). The caller must have tested for HMM_PFN_VALID + * already. + */ +static inline unsigned int hmm_pfn_to_map_order(unsigned long hmm_pfn) +{ + return (hmm_pfn >> HMM_PFN_ORDER_SHIFT) & 0x1F; +} + +/* * struct hmm_range - track invalidation lock on virtual address range * * @notifier: a mmu_interval_notifier that includes the start/end @@ -85,7 +120,7 @@ struct hmm_range { }; /* - * Please see Documentation/vm/hmm.rst for how to use the range API. + * Please see Documentation/mm/hmm.rst for how to use the range API. */ int hmm_range_fault(struct hmm_range *range); @@ -93,7 +128,7 @@ int hmm_range_fault(struct hmm_range *range); * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range * * When waiting for mmu notifiers we need some kind of time out otherwise we - * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to + * could potentially wait for ever, 1000ms ie 1s sounds like a long time to * wait already. */ #define HMM_RANGE_DEFAULT_TIMEOUT 1000 |
