diff options
Diffstat (limited to 'fs/hugetlbfs/inode.c')
| -rw-r--r-- | fs/hugetlbfs/inode.c | 1675 |
1 files changed, 1127 insertions, 548 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index a3f868ae3fd4..3b4c152c5c73 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -4,12 +4,14 @@ * Nadia Yvette Chambers, 2002 * * Copyright (C) 2002 Linus Torvalds. + * License: GPL */ -#include <linux/module.h> +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/thread_info.h> #include <asm/current.h> -#include <linux/sched.h> /* remove ASAP */ +#include <linux/falloc.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/file.h> @@ -24,7 +26,7 @@ #include <linux/backing-dev.h> #include <linux/hugetlb.h> #include <linux/pagevec.h> -#include <linux/parser.h> +#include <linux/fs_parser.h> #include <linux/mman.h> #include <linux/slab.h> #include <linux/dnotify.h> @@ -32,369 +34,608 @@ #include <linux/security.h> #include <linux/magic.h> #include <linux/migrate.h> +#include <linux/uio.h> + +#include <linux/uaccess.h> +#include <linux/sched/mm.h> -#include <asm/uaccess.h> +#define CREATE_TRACE_POINTS +#include <trace/events/hugetlbfs.h> -static const struct super_operations hugetlbfs_ops; static const struct address_space_operations hugetlbfs_aops; -const struct file_operations hugetlbfs_file_operations; +static const struct file_operations hugetlbfs_file_operations; static const struct inode_operations hugetlbfs_dir_inode_operations; static const struct inode_operations hugetlbfs_inode_operations; -struct hugetlbfs_config { - kuid_t uid; - kgid_t gid; - umode_t mode; - long nr_blocks; - long nr_inodes; - struct hstate *hstate; -}; - -struct hugetlbfs_inode_info { - struct shared_policy policy; - struct inode vfs_inode; -}; - -static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) -{ - return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); -} - -static struct backing_dev_info hugetlbfs_backing_dev_info = { - .name = "hugetlbfs", - .ra_pages = 0, /* No readahead */ - .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, +enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT }; + +struct hugetlbfs_fs_context { + struct hstate *hstate; + unsigned long long max_size_opt; + unsigned long long min_size_opt; + long max_hpages; + long nr_inodes; + long min_hpages; + enum hugetlbfs_size_type max_val_type; + enum hugetlbfs_size_type min_val_type; + kuid_t uid; + kgid_t gid; + umode_t mode; }; int sysctl_hugetlb_shm_group; -enum { - Opt_size, Opt_nr_inodes, - Opt_mode, Opt_uid, Opt_gid, +enum hugetlb_param { + Opt_gid, + Opt_min_size, + Opt_mode, + Opt_nr_inodes, Opt_pagesize, - Opt_err, + Opt_size, + Opt_uid, }; -static const match_table_t tokens = { - {Opt_size, "size=%s"}, - {Opt_nr_inodes, "nr_inodes=%s"}, - {Opt_mode, "mode=%o"}, - {Opt_uid, "uid=%u"}, - {Opt_gid, "gid=%u"}, - {Opt_pagesize, "pagesize=%s"}, - {Opt_err, NULL}, +static const struct fs_parameter_spec hugetlb_fs_parameters[] = { + fsparam_gid ("gid", Opt_gid), + fsparam_string("min_size", Opt_min_size), + fsparam_u32oct("mode", Opt_mode), + fsparam_string("nr_inodes", Opt_nr_inodes), + fsparam_string("pagesize", Opt_pagesize), + fsparam_string("size", Opt_size), + fsparam_uid ("uid", Opt_uid), + {} }; -static void huge_pagevec_release(struct pagevec *pvec) -{ - int i; - - for (i = 0; i < pagevec_count(pvec); ++i) - put_page(pvec->pages[i]); +/* + * Mask used when checking the page offset value passed in via system + * calls. This value will be converted to a loff_t which is signed. + * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the + * value. The extra bit (- 1 in the shift value) is to take the sign + * bit into account. + */ +#define PGOFF_LOFFT_MAX \ + (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) - pagevec_reinit(pvec); +static int hugetlb_file_mmap_prepare_success(const struct vm_area_struct *vma) +{ + /* Unfortunate we have to reassign vma->vm_private_data. */ + return hugetlb_vma_lock_alloc((struct vm_area_struct *)vma); } -static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) +static int hugetlbfs_file_mmap_prepare(struct vm_area_desc *desc) { + struct file *file = desc->file; struct inode *inode = file_inode(file); loff_t len, vma_len; int ret; struct hstate *h = hstate_file(file); + vm_flags_t vm_flags; /* * vma address alignment (but not the pgoff alignment) has * already been checked by prepare_hugepage_range. If you add * any error returns here, do so after setting VM_HUGETLB, so * is_vm_hugetlb_page tests below unmap_region go the right - * way when do_mmap_pgoff unwinds (may be important on powerpc + * way when do_mmap unwinds (may be important on powerpc * and ia64). */ - vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; - vma->vm_ops = &hugetlb_vm_ops; + desc->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; + desc->vm_ops = &hugetlb_vm_ops; + + /* + * page based offset in vm_pgoff could be sufficiently large to + * overflow a loff_t when converted to byte offset. This can + * only happen on architectures where sizeof(loff_t) == + * sizeof(unsigned long). So, only check in those instances. + */ + if (sizeof(unsigned long) == sizeof(loff_t)) { + if (desc->pgoff & PGOFF_LOFFT_MAX) + return -EINVAL; + } - if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) + /* must be huge page aligned */ + if (desc->pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) return -EINVAL; - vma_len = (loff_t)(vma->vm_end - vma->vm_start); + vma_len = (loff_t)vma_desc_size(desc); + len = vma_len + ((loff_t)desc->pgoff << PAGE_SHIFT); + /* check for overflow */ + if (len < vma_len) + return -EINVAL; - mutex_lock(&inode->i_mutex); + inode_lock(inode); file_accessed(file); ret = -ENOMEM; - len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); + + vm_flags = desc->vm_flags; + /* + * for SHM_HUGETLB, the pages are reserved in the shmget() call so skip + * reserving here. Note: only for SHM hugetlbfs file, the inode + * flag S_PRIVATE is set. + */ + if (inode->i_flags & S_PRIVATE) + vm_flags |= VM_NORESERVE; if (hugetlb_reserve_pages(inode, - vma->vm_pgoff >> huge_page_order(h), - len >> huge_page_shift(h), vma, - vma->vm_flags)) + desc->pgoff >> huge_page_order(h), + len >> huge_page_shift(h), desc, + vm_flags) < 0) goto out; ret = 0; - hugetlb_prefault_arch_hook(vma->vm_mm); - if (vma->vm_flags & VM_WRITE && inode->i_size < len) - inode->i_size = len; + if ((desc->vm_flags & VM_WRITE) && inode->i_size < len) + i_size_write(inode, len); out: - mutex_unlock(&inode->i_mutex); + inode_unlock(inode); + if (!ret) { + /* Allocate the VMA lock after we set it up. */ + desc->action.success_hook = hugetlb_file_mmap_prepare_success; + /* + * We cannot permit the rmap finding this VMA in the time + * between the VMA being inserted into the VMA tree and the + * completion/success hook being invoked. + * + * This is because we establish a per-VMA hugetlb lock which can + * be raced by rmap. + */ + desc->action.hide_from_rmap_until_complete = true; + } return ret; } /* - * Called under down_write(mmap_sem). + * Called under mmap_write_lock(mm). */ -#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA -static unsigned long +unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) + unsigned long len, unsigned long pgoff, + unsigned long flags) { - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; + unsigned long addr0 = 0; struct hstate *h = hstate_file(file); - struct vm_unmapped_area_info info; if (len & ~huge_page_mask(h)) return -EINVAL; - if (len > TASK_SIZE) - return -ENOMEM; - - if (flags & MAP_FIXED) { - if (prepare_hugepage_range(file, addr, len)) - return -EINVAL; - return addr; - } - - if (addr) { - addr = ALIGN(addr, huge_page_size(h)); - vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) - return addr; - } + if ((flags & MAP_FIXED) && (addr & ~huge_page_mask(h))) + return -EINVAL; + if (addr) + addr0 = ALIGN(addr, huge_page_size(h)); - info.flags = 0; - info.length = len; - info.low_limit = TASK_UNMAPPED_BASE; - info.high_limit = TASK_SIZE; - info.align_mask = PAGE_MASK & ~huge_page_mask(h); - info.align_offset = 0; - return vm_unmapped_area(&info); + return mm_get_unmapped_area_vmflags(file, addr0, len, pgoff, flags, 0); } -#endif -static int -hugetlbfs_read_actor(struct page *page, unsigned long offset, - char __user *buf, unsigned long count, - unsigned long size) +/* + * Someone wants to read @bytes from a HWPOISON hugetlb @folio from @offset. + * Returns the maximum number of bytes one can read without touching the 1st raw + * HWPOISON page. + */ +static size_t adjust_range_hwpoison(struct folio *folio, size_t offset, + size_t bytes) { - char *kaddr; - unsigned long left, copied = 0; - int i, chunksize; - - if (size > count) - size = count; - - /* Find which 4k chunk and offset with in that chunk */ - i = offset >> PAGE_CACHE_SHIFT; - offset = offset & ~PAGE_CACHE_MASK; - - while (size) { - chunksize = PAGE_CACHE_SIZE; - if (offset) - chunksize -= offset; - if (chunksize > size) - chunksize = size; - kaddr = kmap(&page[i]); - left = __copy_to_user(buf, kaddr + offset, chunksize); - kunmap(&page[i]); - if (left) { - copied += (chunksize - left); + struct page *page = folio_page(folio, offset / PAGE_SIZE); + size_t safe_bytes; + + if (is_raw_hwpoison_page_in_hugepage(page)) + return 0; + /* Safe to read the remaining bytes in this page. */ + safe_bytes = PAGE_SIZE - (offset % PAGE_SIZE); + page++; + + /* Check each remaining page as long as we are not done yet. */ + for (; safe_bytes < bytes; safe_bytes += PAGE_SIZE, page++) + if (is_raw_hwpoison_page_in_hugepage(page)) break; - } - offset = 0; - size -= chunksize; - buf += chunksize; - copied += chunksize; - i++; - } - return copied ? copied : -EFAULT; + + return min(safe_bytes, bytes); } /* * Support for read() - Find the page attached to f_mapping and copy out the - * data. Its *very* similar to do_generic_mapping_read(), we can't use that - * since it has PAGE_CACHE_SIZE assumptions. + * data. This provides functionality similar to filemap_read(). */ -static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, - size_t len, loff_t *ppos) +static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) { - struct hstate *h = hstate_file(filp); - struct address_space *mapping = filp->f_mapping; + struct file *file = iocb->ki_filp; + struct hstate *h = hstate_file(file); + struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; - unsigned long index = *ppos >> huge_page_shift(h); - unsigned long offset = *ppos & ~huge_page_mask(h); + unsigned long index = iocb->ki_pos >> huge_page_shift(h); + unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); unsigned long end_index; loff_t isize; ssize_t retval = 0; - /* validate length */ - if (len == 0) - goto out; - - for (;;) { - struct page *page; - unsigned long nr, ret; - int ra; + while (iov_iter_count(to)) { + struct folio *folio; + size_t nr, copied, want; /* nr is the maximum number of bytes to copy from this page */ nr = huge_page_size(h); isize = i_size_read(inode); if (!isize) - goto out; + break; end_index = (isize - 1) >> huge_page_shift(h); - if (index >= end_index) { - if (index > end_index) - goto out; + if (index > end_index) + break; + if (index == end_index) { nr = ((isize - 1) & ~huge_page_mask(h)) + 1; if (nr <= offset) - goto out; + break; } nr = nr - offset; - /* Find the page */ - page = find_lock_page(mapping, index); - if (unlikely(page == NULL)) { + /* Find the folio */ + folio = filemap_lock_hugetlb_folio(h, mapping, index); + if (IS_ERR(folio)) { /* * We have a HOLE, zero out the user-buffer for the * length of the hole or request. */ - ret = len < nr ? len : nr; - if (clear_user(buf, ret)) - ra = -EFAULT; - else - ra = 0; + copied = iov_iter_zero(nr, to); } else { - unlock_page(page); + folio_unlock(folio); + + if (!folio_test_hwpoison(folio)) + want = nr; + else { + /* + * Adjust how many bytes safe to read without + * touching the 1st raw HWPOISON page after + * offset. + */ + want = adjust_range_hwpoison(folio, offset, nr); + if (want == 0) { + folio_put(folio); + retval = -EIO; + break; + } + } /* - * We have the page, copy it to user space buffer. + * We have the folio, copy it to user space buffer. */ - ra = hugetlbfs_read_actor(page, offset, buf, len, nr); - ret = ra; - page_cache_release(page); + copied = copy_folio_to_iter(folio, offset, want, to); + folio_put(folio); } - if (ra < 0) { - if (retval == 0) - retval = ra; - goto out; + offset += copied; + retval += copied; + if (copied != nr && iov_iter_count(to)) { + if (!retval) + retval = -EFAULT; + break; } - - offset += ret; - retval += ret; - len -= ret; index += offset >> huge_page_shift(h); offset &= ~huge_page_mask(h); - - /* short read or no more work */ - if ((ret != nr) || (len == 0)) - break; } -out: - *ppos = ((loff_t)index << huge_page_shift(h)) + offset; + iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; return retval; } -static int hugetlbfs_write_begin(struct file *file, +static int hugetlbfs_write_begin(const struct kiocb *iocb, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata) + loff_t pos, unsigned len, + struct folio **foliop, void **fsdata) { return -EINVAL; } -static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct page *page, void *fsdata) +static int hugetlbfs_write_end(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct folio *folio, void *fsdata) { BUG(); return -EINVAL; } -static void truncate_huge_page(struct page *page) +static void hugetlb_delete_from_page_cache(struct folio *folio) { - cancel_dirty_page(page, /* No IO accounting for huge pages? */0); - ClearPageUptodate(page); - delete_from_page_cache(page); + folio_clear_dirty(folio); + folio_clear_uptodate(folio); + filemap_remove_folio(folio); } -static void truncate_hugepages(struct inode *inode, loff_t lstart) +/* + * Called with i_mmap_rwsem held for inode based vma maps. This makes + * sure vma (and vm_mm) will not go away. We also hold the hugetlb fault + * mutex for the page in the mapping. So, we can not race with page being + * faulted into the vma. + */ +static bool hugetlb_vma_maps_pfn(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn) { - struct hstate *h = hstate_inode(inode); - struct address_space *mapping = &inode->i_data; - const pgoff_t start = lstart >> huge_page_shift(h); - struct pagevec pvec; - pgoff_t next; - int i, freed = 0; + pte_t *ptep, pte; - pagevec_init(&pvec, 0); - next = start; - while (1) { - if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { - if (next == start) - break; - next = start; + ptep = hugetlb_walk(vma, addr, huge_page_size(hstate_vma(vma))); + if (!ptep) + return false; + + pte = huge_ptep_get(vma->vm_mm, addr, ptep); + if (huge_pte_none(pte) || !pte_present(pte)) + return false; + + if (pte_pfn(pte) == pfn) + return true; + + return false; +} + +/* + * Can vma_offset_start/vma_offset_end overflow on 32-bit arches? + * No, because the interval tree returns us only those vmas + * which overlap the truncated area starting at pgoff, + * and no vma on a 32-bit arch can span beyond the 4GB. + */ +static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start) +{ + unsigned long offset = 0; + + if (vma->vm_pgoff < start) + offset = (start - vma->vm_pgoff) << PAGE_SHIFT; + + return vma->vm_start + offset; +} + +static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end) +{ + unsigned long t_end; + + if (!end) + return vma->vm_end; + + t_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start; + if (t_end > vma->vm_end) + t_end = vma->vm_end; + return t_end; +} + +/* + * Called with hugetlb fault mutex held. Therefore, no more mappings to + * this folio can be created while executing the routine. + */ +static void hugetlb_unmap_file_folio(struct hstate *h, + struct address_space *mapping, + struct folio *folio, pgoff_t index) +{ + struct rb_root_cached *root = &mapping->i_mmap; + struct hugetlb_vma_lock *vma_lock; + unsigned long pfn = folio_pfn(folio); + struct vm_area_struct *vma; + unsigned long v_start; + unsigned long v_end; + pgoff_t start, end; + + start = index * pages_per_huge_page(h); + end = (index + 1) * pages_per_huge_page(h); + + i_mmap_lock_write(mapping); +retry: + vma_lock = NULL; + vma_interval_tree_foreach(vma, root, start, end - 1) { + v_start = vma_offset_start(vma, start); + v_end = vma_offset_end(vma, end); + + if (!hugetlb_vma_maps_pfn(vma, v_start, pfn)) continue; + + if (!hugetlb_vma_trylock_write(vma)) { + vma_lock = vma->vm_private_data; + /* + * If we can not get vma lock, we need to drop + * immap_sema and take locks in order. First, + * take a ref on the vma_lock structure so that + * we can be guaranteed it will not go away when + * dropping immap_sema. + */ + kref_get(&vma_lock->refs); + break; } - for (i = 0; i < pagevec_count(&pvec); ++i) { - struct page *page = pvec.pages[i]; + unmap_hugepage_range(vma, v_start, v_end, NULL, + ZAP_FLAG_DROP_MARKER); + hugetlb_vma_unlock_write(vma); + } + + i_mmap_unlock_write(mapping); - lock_page(page); - if (page->index > next) - next = page->index; - ++next; - truncate_huge_page(page); - unlock_page(page); - freed++; + if (vma_lock) { + /* + * Wait on vma_lock. We know it is still valid as we have + * a reference. We must 'open code' vma locking as we do + * not know if vma_lock is still attached to vma. + */ + down_write(&vma_lock->rw_sema); + i_mmap_lock_write(mapping); + + vma = vma_lock->vma; + if (!vma) { + /* + * If lock is no longer attached to vma, then just + * unlock, drop our reference and retry looking for + * other vmas. + */ + up_write(&vma_lock->rw_sema); + kref_put(&vma_lock->refs, hugetlb_vma_lock_release); + goto retry; } - huge_pagevec_release(&pvec); - } - BUG_ON(!lstart && mapping->nrpages); - hugetlb_unreserve_pages(inode, start, freed); -} -static void hugetlbfs_evict_inode(struct inode *inode) -{ - truncate_hugepages(inode, 0); - clear_inode(inode); + /* + * vma_lock is still attached to vma. Check to see if vma + * still maps page and if so, unmap. + */ + v_start = vma_offset_start(vma, start); + v_end = vma_offset_end(vma, end); + if (hugetlb_vma_maps_pfn(vma, v_start, pfn)) + unmap_hugepage_range(vma, v_start, v_end, NULL, + ZAP_FLAG_DROP_MARKER); + + kref_put(&vma_lock->refs, hugetlb_vma_lock_release); + hugetlb_vma_unlock_write(vma); + + goto retry; + } } -static inline void -hugetlb_vmtruncate_list(struct rb_root *root, pgoff_t pgoff) +static void +hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end, + zap_flags_t zap_flags) { struct vm_area_struct *vma; - vma_interval_tree_foreach(vma, root, pgoff, ULONG_MAX) { - unsigned long v_offset; + /* + * end == 0 indicates that the entire range after start should be + * unmapped. Note, end is exclusive, whereas the interval tree takes + * an inclusive "last". + */ + vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) { + unsigned long v_start; + unsigned long v_end; + + if (!hugetlb_vma_trylock_write(vma)) + continue; + + v_start = vma_offset_start(vma, start); + v_end = vma_offset_end(vma, end); + + unmap_hugepage_range(vma, v_start, v_end, NULL, zap_flags); /* - * Can the expression below overflow on 32-bit arches? - * No, because the interval tree returns us only those vmas - * which overlap the truncated area starting at pgoff, - * and no vma on a 32-bit arch can span beyond the 4GB. + * Note that vma lock only exists for shared/non-private + * vmas. Therefore, lock is not held when calling + * unmap_hugepage_range for private vmas. */ - if (vma->vm_pgoff < pgoff) - v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT; - else - v_offset = 0; + hugetlb_vma_unlock_write(vma); + } +} + +/* + * Called with hugetlb fault mutex held. + * Returns true if page was actually removed, false otherwise. + */ +static bool remove_inode_single_folio(struct hstate *h, struct inode *inode, + struct address_space *mapping, + struct folio *folio, pgoff_t index, + bool truncate_op) +{ + bool ret = false; + + /* + * If folio is mapped, it was faulted in after being + * unmapped in caller or hugetlb_vmdelete_list() skips + * unmapping it due to fail to grab lock. Unmap (again) + * while holding the fault mutex. The mutex will prevent + * faults until we finish removing the folio. Hold folio + * lock to guarantee no concurrent migration. + */ + folio_lock(folio); + if (unlikely(folio_mapped(folio))) + hugetlb_unmap_file_folio(h, mapping, folio, index); + + /* + * We must remove the folio from page cache before removing + * the region/ reserve map (hugetlb_unreserve_pages). In + * rare out of memory conditions, removal of the region/reserve + * map could fail. Correspondingly, the subpool and global + * reserve usage count can need to be adjusted. + */ + VM_BUG_ON_FOLIO(folio_test_hugetlb_restore_reserve(folio), folio); + hugetlb_delete_from_page_cache(folio); + ret = true; + if (!truncate_op) { + if (unlikely(hugetlb_unreserve_pages(inode, index, + index + 1, 1))) + hugetlb_fix_reserve_counts(inode); + } + + folio_unlock(folio); + return ret; +} + +/* + * remove_inode_hugepages handles two distinct cases: truncation and hole + * punch. There are subtle differences in operation for each case. + * + * truncation is indicated by end of range being LLONG_MAX + * In this case, we first scan the range and release found pages. + * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve + * maps and global counts. Page faults can race with truncation. + * During faults, hugetlb_no_page() checks i_size before page allocation, + * and again after obtaining page table lock. It will 'back out' + * allocations in the truncated range. + * hole punch is indicated if end is not LLONG_MAX + * In the hole punch case we scan the range and release found pages. + * Only when releasing a page is the associated region/reserve map + * deleted. The region/reserve map for ranges without associated + * pages are not modified. Page faults can race with hole punch. + * This is indicated if we find a mapped page. + * Note: If the passed end of range value is beyond the end of file, but + * not LLONG_MAX this routine still performs a hole punch operation. + */ +static void remove_inode_hugepages(struct inode *inode, loff_t lstart, + loff_t lend) +{ + struct hstate *h = hstate_inode(inode); + struct address_space *mapping = &inode->i_data; + const pgoff_t end = lend >> PAGE_SHIFT; + struct folio_batch fbatch; + pgoff_t next, index; + int i, freed = 0; + bool truncate_op = (lend == LLONG_MAX); + + folio_batch_init(&fbatch); + next = lstart >> PAGE_SHIFT; + while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { + for (i = 0; i < folio_batch_count(&fbatch); ++i) { + struct folio *folio = fbatch.folios[i]; + u32 hash = 0; - unmap_hugepage_range(vma, vma->vm_start + v_offset, - vma->vm_end, NULL); + index = folio->index >> huge_page_order(h); + hash = hugetlb_fault_mutex_hash(mapping, index); + mutex_lock(&hugetlb_fault_mutex_table[hash]); + + /* + * Remove folio that was part of folio_batch. + */ + if (remove_inode_single_folio(h, inode, mapping, folio, + index, truncate_op)) + freed++; + + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + } + folio_batch_release(&fbatch); + cond_resched(); } + + if (truncate_op) + (void)hugetlb_unreserve_pages(inode, + lstart >> huge_page_shift(h), + LONG_MAX, freed); } -static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) +static void hugetlbfs_evict_inode(struct inode *inode) +{ + struct resv_map *resv_map; + + trace_hugetlbfs_evict_inode(inode); + remove_inode_hugepages(inode, 0, LLONG_MAX); + + /* + * Get the resv_map from the address space embedded in the inode. + * This is the address space which points to any resv_map allocated + * at inode creation time. If this is a device special inode, + * i_mapping may not point to the original address space. + */ + resv_map = (struct resv_map *)(&inode->i_data)->i_private_data; + /* Only regular and link inodes have associated reserve maps */ + if (resv_map) + resv_map_release(&resv_map->refs); + clear_inode(inode); +} + +static void hugetlb_vmtruncate(struct inode *inode, loff_t offset) { pgoff_t pgoff; struct address_space *mapping = inode->i_mapping; @@ -404,56 +645,266 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) pgoff = offset >> PAGE_SHIFT; i_size_write(inode, offset); - mutex_lock(&mapping->i_mmap_mutex); - if (!RB_EMPTY_ROOT(&mapping->i_mmap)) - hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff); - mutex_unlock(&mapping->i_mmap_mutex); - truncate_hugepages(inode, offset); + i_mmap_lock_write(mapping); + if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) + hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0, + ZAP_FLAG_DROP_MARKER); + i_mmap_unlock_write(mapping); + remove_inode_hugepages(inode, offset, LLONG_MAX); +} + +static void hugetlbfs_zero_partial_page(struct hstate *h, + struct address_space *mapping, + loff_t start, + loff_t end) +{ + pgoff_t idx = start >> huge_page_shift(h); + struct folio *folio; + + folio = filemap_lock_hugetlb_folio(h, mapping, idx); + if (IS_ERR(folio)) + return; + + start = start & ~huge_page_mask(h); + end = end & ~huge_page_mask(h); + if (!end) + end = huge_page_size(h); + + folio_zero_segment(folio, (size_t)start, (size_t)end); + + folio_unlock(folio); + folio_put(folio); +} + +static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) +{ + struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); + struct address_space *mapping = inode->i_mapping; + struct hstate *h = hstate_inode(inode); + loff_t hpage_size = huge_page_size(h); + loff_t hole_start, hole_end; + + /* + * hole_start and hole_end indicate the full pages within the hole. + */ + hole_start = round_up(offset, hpage_size); + hole_end = round_down(offset + len, hpage_size); + + inode_lock(inode); + + /* protected by i_rwsem */ + if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { + inode_unlock(inode); + return -EPERM; + } + + i_mmap_lock_write(mapping); + + /* If range starts before first full page, zero partial page. */ + if (offset < hole_start) + hugetlbfs_zero_partial_page(h, mapping, + offset, min(offset + len, hole_start)); + + /* Unmap users of full pages in the hole. */ + if (hole_end > hole_start) { + if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) + hugetlb_vmdelete_list(&mapping->i_mmap, + hole_start >> PAGE_SHIFT, + hole_end >> PAGE_SHIFT, 0); + } + + /* If range extends beyond last full page, zero partial page. */ + if ((offset + len) > hole_end && (offset + len) > hole_start) + hugetlbfs_zero_partial_page(h, mapping, + hole_end, offset + len); + + i_mmap_unlock_write(mapping); + + /* Remove full pages from the file. */ + if (hole_end > hole_start) + remove_inode_hugepages(inode, hole_start, hole_end); + + inode_unlock(inode); + return 0; } -static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) +static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, + loff_t len) { - struct inode *inode = dentry->d_inode; + struct inode *inode = file_inode(file); + struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); + struct address_space *mapping = inode->i_mapping; struct hstate *h = hstate_inode(inode); + struct vm_area_struct pseudo_vma; + struct mm_struct *mm = current->mm; + loff_t hpage_size = huge_page_size(h); + unsigned long hpage_shift = huge_page_shift(h); + pgoff_t start, index, end; int error; - unsigned int ia_valid = attr->ia_valid; + u32 hash; + + if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) + return -EOPNOTSUPP; + + if (mode & FALLOC_FL_PUNCH_HOLE) { + error = hugetlbfs_punch_hole(inode, offset, len); + goto out_nolock; + } + + /* + * Default preallocate case. + * For this range, start is rounded down and end is rounded up + * as well as being converted to page offsets. + */ + start = offset >> hpage_shift; + end = (offset + len + hpage_size - 1) >> hpage_shift; + + inode_lock(inode); + + /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ + error = inode_newsize_ok(inode, offset + len); + if (error) + goto out; + + if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { + error = -EPERM; + goto out; + } + + /* + * Initialize a pseudo vma as this is required by the huge page + * allocation routines. + */ + vma_init(&pseudo_vma, mm); + vm_flags_init(&pseudo_vma, VM_HUGETLB | VM_MAYSHARE | VM_SHARED); + pseudo_vma.vm_file = file; - BUG_ON(!inode); + for (index = start; index < end; index++) { + /* + * This is supposed to be the vaddr where the page is being + * faulted in, but we have no vaddr here. + */ + struct folio *folio; + unsigned long addr; + + cond_resched(); + + /* + * fallocate(2) manpage permits EINTR; we may have been + * interrupted because we are using up too much memory. + */ + if (signal_pending(current)) { + error = -EINTR; + break; + } + + /* addr is the offset within the file (zero based) */ + addr = index * hpage_size; + + /* mutex taken here, fault path and hole punch */ + hash = hugetlb_fault_mutex_hash(mapping, index); + mutex_lock(&hugetlb_fault_mutex_table[hash]); + + /* See if already present in mapping to avoid alloc/free */ + folio = filemap_get_folio(mapping, index << huge_page_order(h)); + if (!IS_ERR(folio)) { + folio_put(folio); + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + continue; + } + + /* + * Allocate folio without setting the avoid_reserve argument. + * There certainly are no reserves associated with the + * pseudo_vma. However, there could be shared mappings with + * reserves for the file at the inode level. If we fallocate + * folios in these areas, we need to consume the reserves + * to keep reservation accounting consistent. + */ + folio = alloc_hugetlb_folio(&pseudo_vma, addr, false); + if (IS_ERR(folio)) { + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + error = PTR_ERR(folio); + goto out; + } + folio_zero_user(folio, addr); + __folio_mark_uptodate(folio); + error = hugetlb_add_to_page_cache(folio, mapping, index); + if (unlikely(error)) { + restore_reserve_on_error(h, &pseudo_vma, addr, folio); + folio_put(folio); + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + goto out; + } + + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + + folio_set_hugetlb_migratable(folio); + /* + * folio_unlock because locked by hugetlb_add_to_page_cache() + * folio_put() due to reference from alloc_hugetlb_folio() + */ + folio_unlock(folio); + folio_put(folio); + } + + if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) + i_size_write(inode, offset + len); + inode_set_ctime_current(inode); +out: + inode_unlock(inode); + +out_nolock: + trace_hugetlbfs_fallocate(inode, mode, offset, len, error); + return error; +} - error = inode_change_ok(inode, attr); +static int hugetlbfs_setattr(struct mnt_idmap *idmap, + struct dentry *dentry, struct iattr *attr) +{ + struct inode *inode = d_inode(dentry); + struct hstate *h = hstate_inode(inode); + int error; + unsigned int ia_valid = attr->ia_valid; + struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); + + error = setattr_prepare(idmap, dentry, attr); if (error) return error; + trace_hugetlbfs_setattr(inode, dentry, attr); + if (ia_valid & ATTR_SIZE) { - error = -EINVAL; - if (attr->ia_size & ~huge_page_mask(h)) + loff_t oldsize = inode->i_size; + loff_t newsize = attr->ia_size; + + if (newsize & ~huge_page_mask(h)) return -EINVAL; - error = hugetlb_vmtruncate(inode, attr->ia_size); - if (error) - return error; + /* protected by i_rwsem */ + if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || + (newsize > oldsize && (info->seals & F_SEAL_GROW))) + return -EPERM; + hugetlb_vmtruncate(inode, newsize); } - setattr_copy(inode, attr); + setattr_copy(idmap, inode, attr); mark_inode_dirty(inode); return 0; } static struct inode *hugetlbfs_get_root(struct super_block *sb, - struct hugetlbfs_config *config) + struct hugetlbfs_fs_context *ctx) { struct inode *inode; inode = new_inode(sb); if (inode) { - struct hugetlbfs_inode_info *info; inode->i_ino = get_next_ino(); - inode->i_mode = S_IFDIR | config->mode; - inode->i_uid = config->uid; - inode->i_gid = config->gid; - inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; - info = HUGETLBFS_I(inode); - mpol_shared_policy_init(&info->policy, NULL); + inode->i_mode = S_IFDIR | ctx->mode; + inode->i_uid = ctx->uid; + inode->i_gid = ctx->gid; + simple_inode_init_ts(inode); inode->i_op = &hugetlbfs_dir_inode_operations; inode->i_fop = &simple_dir_operations; /* directory inodes start off with i_nlink == 2 (for "." entry) */ @@ -463,30 +914,44 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb, return inode; } +/* + * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never + * be taken from reclaim -- unlike regular filesystems. This needs an + * annotation because huge_pmd_share() does an allocation under hugetlb's + * i_mmap_rwsem. + */ +static struct lock_class_key hugetlbfs_i_mmap_rwsem_key; + static struct inode *hugetlbfs_get_inode(struct super_block *sb, + struct mnt_idmap *idmap, struct inode *dir, umode_t mode, dev_t dev) { struct inode *inode; + struct resv_map *resv_map = NULL; + + /* + * Reserve maps are only needed for inodes that can have associated + * page allocations. + */ + if (S_ISREG(mode) || S_ISLNK(mode)) { + resv_map = resv_map_alloc(); + if (!resv_map) + return NULL; + } inode = new_inode(sb); if (inode) { - struct hugetlbfs_inode_info *info; + struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); + inode->i_ino = get_next_ino(); - inode_init_owner(inode, dir, mode); + inode_init_owner(idmap, inode, dir, mode); + lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, + &hugetlbfs_i_mmap_rwsem_key); inode->i_mapping->a_ops = &hugetlbfs_aops; - inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info; - inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; - INIT_LIST_HEAD(&inode->i_mapping->private_list); - info = HUGETLBFS_I(inode); - /* - * The policy is initialized here even if we are creating a - * private inode because initialization simply creates an - * an empty rb tree and calls spin_lock_init(), later when we - * call mpol_free_shared_policy() it will just return because - * the rb tree will still be empty. - */ - mpol_shared_policy_init(&info->policy, NULL); + simple_inode_init_ts(inode); + inode->i_mapping->i_private_data = resv_map; + info->seals = F_SEAL_SEAL; switch (mode & S_IFMT) { default: init_special_inode(inode, mode, dev); @@ -504,111 +969,181 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, break; case S_IFLNK: inode->i_op = &page_symlink_inode_operations; + inode_nohighmem(inode); break; } lockdep_annotate_inode_mutex_key(inode); + trace_hugetlbfs_alloc_inode(inode, dir, mode); + } else { + if (resv_map) + kref_put(&resv_map->refs, resv_map_release); } + return inode; } /* * File creation. Allocate an inode, and we're done.. */ -static int hugetlbfs_mknod(struct inode *dir, - struct dentry *dentry, umode_t mode, dev_t dev) +static int hugetlbfs_mknod(struct mnt_idmap *idmap, struct inode *dir, + struct dentry *dentry, umode_t mode, dev_t dev) { struct inode *inode; - int error = -ENOSPC; - inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev); - if (inode) { - dir->i_ctime = dir->i_mtime = CURRENT_TIME; - d_instantiate(dentry, inode); - dget(dentry); /* Extra count - pin the dentry in core */ - error = 0; - } - return error; + inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, dev); + if (!inode) + return -ENOSPC; + inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); + d_make_persistent(dentry, inode); + return 0; } -static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) +static struct dentry *hugetlbfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, + struct dentry *dentry, umode_t mode) { - int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0); + int retval = hugetlbfs_mknod(idmap, dir, dentry, + mode | S_IFDIR, 0); if (!retval) inc_nlink(dir); - return retval; + return ERR_PTR(retval); } -static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) +static int hugetlbfs_create(struct mnt_idmap *idmap, + struct inode *dir, struct dentry *dentry, + umode_t mode, bool excl) { - return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0); + return hugetlbfs_mknod(idmap, dir, dentry, mode | S_IFREG, 0); } -static int hugetlbfs_symlink(struct inode *dir, - struct dentry *dentry, const char *symname) +static int hugetlbfs_tmpfile(struct mnt_idmap *idmap, + struct inode *dir, struct file *file, + umode_t mode) { struct inode *inode; + + inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode | S_IFREG, 0); + if (!inode) + return -ENOSPC; + inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); + d_tmpfile(file, inode); + return finish_open_simple(file, 0); +} + +static int hugetlbfs_symlink(struct mnt_idmap *idmap, + struct inode *dir, struct dentry *dentry, + const char *symname) +{ + const umode_t mode = S_IFLNK|S_IRWXUGO; + struct inode *inode; int error = -ENOSPC; - inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0); + inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, 0); if (inode) { int l = strlen(symname)+1; error = page_symlink(inode, symname, l); - if (!error) { - d_instantiate(dentry, inode); - dget(dentry); - } else + if (!error) + d_make_persistent(dentry, inode); + else iput(inode); } - dir->i_ctime = dir->i_mtime = CURRENT_TIME; + inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); return error; } -/* - * mark the head page dirty - */ -static int hugetlbfs_set_page_dirty(struct page *page) +#ifdef CONFIG_MIGRATION +static int hugetlbfs_migrate_folio(struct address_space *mapping, + struct folio *dst, struct folio *src, + enum migrate_mode mode) { - struct page *head = compound_head(page); + int rc; + + rc = migrate_huge_page_move_mapping(mapping, dst, src); + if (rc) + return rc; + + if (hugetlb_folio_subpool(src)) { + hugetlb_set_folio_subpool(dst, + hugetlb_folio_subpool(src)); + hugetlb_set_folio_subpool(src, NULL); + } + + folio_migrate_flags(dst, src); - SetPageDirty(head); return 0; } +#else +#define hugetlbfs_migrate_folio NULL +#endif -static int hugetlbfs_migrate_page(struct address_space *mapping, - struct page *newpage, struct page *page, - enum migrate_mode mode) +static int hugetlbfs_error_remove_folio(struct address_space *mapping, + struct folio *folio) { - int rc; - - rc = migrate_huge_page_move_mapping(mapping, newpage, page); - if (rc != MIGRATEPAGE_SUCCESS) - return rc; - migrate_page_copy(newpage, page); + return 0; +} - return MIGRATEPAGE_SUCCESS; +/* + * Display the mount options in /proc/mounts. + */ +static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root) +{ + struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb); + struct hugepage_subpool *spool = sbinfo->spool; + unsigned long hpage_size = huge_page_size(sbinfo->hstate); + unsigned hpage_shift = huge_page_shift(sbinfo->hstate); + char mod; + + if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) + seq_printf(m, ",uid=%u", + from_kuid_munged(&init_user_ns, sbinfo->uid)); + if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) + seq_printf(m, ",gid=%u", + from_kgid_munged(&init_user_ns, sbinfo->gid)); + if (sbinfo->mode != 0755) + seq_printf(m, ",mode=%o", sbinfo->mode); + if (sbinfo->max_inodes != -1) + seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes); + + hpage_size /= 1024; + mod = 'K'; + if (hpage_size >= 1024) { + hpage_size /= 1024; + mod = 'M'; + } + seq_printf(m, ",pagesize=%lu%c", hpage_size, mod); + if (spool) { + if (spool->max_hpages != -1) + seq_printf(m, ",size=%llu", + (unsigned long long)spool->max_hpages << hpage_shift); + if (spool->min_hpages != -1) + seq_printf(m, ",min_size=%llu", + (unsigned long long)spool->min_hpages << hpage_shift); + } + return 0; } static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); - struct hstate *h = hstate_inode(dentry->d_inode); + struct hstate *h = hstate_inode(d_inode(dentry)); + u64 id = huge_encode_dev(dentry->d_sb->s_dev); + buf->f_fsid = u64_to_fsid(id); buf->f_type = HUGETLBFS_MAGIC; buf->f_bsize = huge_page_size(h); if (sbinfo) { spin_lock(&sbinfo->stat_lock); - /* If no limits set, just report 0 for max/free/used + /* If no limits set, just report 0 or -1 for max/free/used * blocks, like simple_statfs() */ if (sbinfo->spool) { long free_pages; - spin_lock(&sbinfo->spool->lock); + spin_lock_irq(&sbinfo->spool->lock); buf->f_blocks = sbinfo->spool->max_hpages; free_pages = sbinfo->spool->max_hpages - sbinfo->spool->used_hpages; buf->f_bavail = buf->f_bfree = free_pages; - spin_unlock(&sbinfo->spool->lock); + spin_unlock_irq(&sbinfo->spool->lock); buf->f_files = sbinfo->max_inodes; buf->f_ffree = sbinfo->free_inodes; } @@ -666,7 +1201,7 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) return NULL; - p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL); + p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL); if (unlikely(!p)) { hugetlbfs_inc_free_inodes(sbinfo); return NULL; @@ -674,40 +1209,41 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) return &p->vfs_inode; } -static void hugetlbfs_i_callback(struct rcu_head *head) +static void hugetlbfs_free_inode(struct inode *inode) { - struct inode *inode = container_of(head, struct inode, i_rcu); + trace_hugetlbfs_free_inode(inode); kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); } static void hugetlbfs_destroy_inode(struct inode *inode) { hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); - mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy); - call_rcu(&inode->i_rcu, hugetlbfs_i_callback); } static const struct address_space_operations hugetlbfs_aops = { .write_begin = hugetlbfs_write_begin, .write_end = hugetlbfs_write_end, - .set_page_dirty = hugetlbfs_set_page_dirty, - .migratepage = hugetlbfs_migrate_page, + .dirty_folio = noop_dirty_folio, + .migrate_folio = hugetlbfs_migrate_folio, + .error_remove_folio = hugetlbfs_error_remove_folio, }; static void init_once(void *foo) { - struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; + struct hugetlbfs_inode_info *ei = foo; inode_init_once(&ei->vfs_inode); } -const struct file_operations hugetlbfs_file_operations = { - .read = hugetlbfs_read, - .mmap = hugetlbfs_file_mmap, +static const struct file_operations hugetlbfs_file_operations = { + .read_iter = hugetlbfs_read_iter, + .mmap_prepare = hugetlbfs_file_mmap_prepare, .fsync = noop_fsync, .get_unmapped_area = hugetlb_get_unmapped_area, - .llseek = default_llseek, + .llseek = default_llseek, + .fallocate = hugetlbfs_fallocate, + .fop_flags = FOP_HUGE_PAGES, }; static const struct inode_operations hugetlbfs_dir_inode_operations = { @@ -721,6 +1257,7 @@ static const struct inode_operations hugetlbfs_dir_inode_operations = { .mknod = hugetlbfs_mknod, .rename = simple_rename, .setattr = hugetlbfs_setattr, + .tmpfile = hugetlbfs_tmpfile, }; static const struct inode_operations hugetlbfs_inode_operations = { @@ -729,174 +1266,243 @@ static const struct inode_operations hugetlbfs_inode_operations = { static const struct super_operations hugetlbfs_ops = { .alloc_inode = hugetlbfs_alloc_inode, + .free_inode = hugetlbfs_free_inode, .destroy_inode = hugetlbfs_destroy_inode, .evict_inode = hugetlbfs_evict_inode, .statfs = hugetlbfs_statfs, .put_super = hugetlbfs_put_super, - .show_options = generic_show_options, + .show_options = hugetlbfs_show_options, }; -static int -hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig) +/* + * Convert size option passed from command line to number of huge pages + * in the pool specified by hstate. Size option could be in bytes + * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). + */ +static long +hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, + enum hugetlbfs_size_type val_type) { - char *p, *rest; - substring_t args[MAX_OPT_ARGS]; - int option; - unsigned long long size = 0; - enum { NO_SIZE, SIZE_STD, SIZE_PERCENT } setsize = NO_SIZE; + if (val_type == NO_SIZE) + return -1; - if (!options) - return 0; + if (val_type == SIZE_PERCENT) { + size_opt <<= huge_page_shift(h); + size_opt *= h->max_huge_pages; + do_div(size_opt, 100); + } - while ((p = strsep(&options, ",")) != NULL) { - int token; - if (!*p) - continue; + size_opt >>= huge_page_shift(h); + return size_opt; +} - token = match_token(p, tokens, args); - switch (token) { - case Opt_uid: - if (match_int(&args[0], &option)) - goto bad_val; - pconfig->uid = make_kuid(current_user_ns(), option); - if (!uid_valid(pconfig->uid)) - goto bad_val; - break; +/* + * Parse one mount parameter. + */ +static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param) +{ + struct hugetlbfs_fs_context *ctx = fc->fs_private; + struct fs_parse_result result; + struct hstate *h; + char *rest; + unsigned long ps; + int opt; - case Opt_gid: - if (match_int(&args[0], &option)) - goto bad_val; - pconfig->gid = make_kgid(current_user_ns(), option); - if (!gid_valid(pconfig->gid)) - goto bad_val; - break; + opt = fs_parse(fc, hugetlb_fs_parameters, param, &result); + if (opt < 0) + return opt; - case Opt_mode: - if (match_octal(&args[0], &option)) - goto bad_val; - pconfig->mode = option & 01777U; - break; + switch (opt) { + case Opt_uid: + ctx->uid = result.uid; + return 0; - case Opt_size: { - /* memparse() will accept a K/M/G without a digit */ - if (!isdigit(*args[0].from)) - goto bad_val; - size = memparse(args[0].from, &rest); - setsize = SIZE_STD; - if (*rest == '%') - setsize = SIZE_PERCENT; - break; - } + case Opt_gid: + ctx->gid = result.gid; + return 0; - case Opt_nr_inodes: - /* memparse() will accept a K/M/G without a digit */ - if (!isdigit(*args[0].from)) - goto bad_val; - pconfig->nr_inodes = memparse(args[0].from, &rest); - break; + case Opt_mode: + ctx->mode = result.uint_32 & 01777U; + return 0; - case Opt_pagesize: { - unsigned long ps; - ps = memparse(args[0].from, &rest); - pconfig->hstate = size_to_hstate(ps); - if (!pconfig->hstate) { - printk(KERN_ERR - "hugetlbfs: Unsupported page size %lu MB\n", - ps >> 20); - return -EINVAL; - } - break; - } + case Opt_size: + /* memparse() will accept a K/M/G without a digit */ + if (!param->string || !isdigit(param->string[0])) + goto bad_val; + ctx->max_size_opt = memparse(param->string, &rest); + ctx->max_val_type = SIZE_STD; + if (*rest == '%') + ctx->max_val_type = SIZE_PERCENT; + return 0; - default: - printk(KERN_ERR "hugetlbfs: Bad mount option: \"%s\"\n", - p); + case Opt_nr_inodes: + /* memparse() will accept a K/M/G without a digit */ + if (!param->string || !isdigit(param->string[0])) + goto bad_val; + ctx->nr_inodes = memparse(param->string, &rest); + return 0; + + case Opt_pagesize: + ps = memparse(param->string, &rest); + h = size_to_hstate(ps); + if (!h) { + pr_err("Unsupported page size %lu MB\n", ps / SZ_1M); return -EINVAL; - break; } + ctx->hstate = h; + return 0; + + case Opt_min_size: + /* memparse() will accept a K/M/G without a digit */ + if (!param->string || !isdigit(param->string[0])) + goto bad_val; + ctx->min_size_opt = memparse(param->string, &rest); + ctx->min_val_type = SIZE_STD; + if (*rest == '%') + ctx->min_val_type = SIZE_PERCENT; + return 0; + + default: + return -EINVAL; } - /* Do size after hstate is set up */ - if (setsize > NO_SIZE) { - struct hstate *h = pconfig->hstate; - if (setsize == SIZE_PERCENT) { - size <<= huge_page_shift(h); - size *= h->max_huge_pages; - do_div(size, 100); - } - pconfig->nr_blocks = (size >> huge_page_shift(h)); +bad_val: + return invalfc(fc, "Bad value '%s' for mount option '%s'\n", + param->string, param->key); +} + +/* + * Validate the parsed options. + */ +static int hugetlbfs_validate(struct fs_context *fc) +{ + struct hugetlbfs_fs_context *ctx = fc->fs_private; + + /* + * Use huge page pool size (in hstate) to convert the size + * options to number of huge pages. If NO_SIZE, -1 is returned. + */ + ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate, + ctx->max_size_opt, + ctx->max_val_type); + ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate, + ctx->min_size_opt, + ctx->min_val_type); + + /* + * If max_size was specified, then min_size must be smaller + */ + if (ctx->max_val_type > NO_SIZE && + ctx->min_hpages > ctx->max_hpages) { + pr_err("Minimum size can not be greater than maximum size\n"); + return -EINVAL; } return 0; - -bad_val: - printk(KERN_ERR "hugetlbfs: Bad value '%s' for mount option '%s'\n", - args[0].from, p); - return -EINVAL; } static int -hugetlbfs_fill_super(struct super_block *sb, void *data, int silent) +hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) { - int ret; - struct hugetlbfs_config config; + struct hugetlbfs_fs_context *ctx = fc->fs_private; struct hugetlbfs_sb_info *sbinfo; - save_mount_options(sb, data); - - config.nr_blocks = -1; /* No limit on size by default */ - config.nr_inodes = -1; /* No limit on number of inodes by default */ - config.uid = current_fsuid(); - config.gid = current_fsgid(); - config.mode = 0755; - config.hstate = &default_hstate; - ret = hugetlbfs_parse_options(data, &config); - if (ret) - return ret; - sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); if (!sbinfo) return -ENOMEM; sb->s_fs_info = sbinfo; - sbinfo->hstate = config.hstate; spin_lock_init(&sbinfo->stat_lock); - sbinfo->max_inodes = config.nr_inodes; - sbinfo->free_inodes = config.nr_inodes; - sbinfo->spool = NULL; - if (config.nr_blocks != -1) { - sbinfo->spool = hugepage_new_subpool(config.nr_blocks); + sbinfo->hstate = ctx->hstate; + sbinfo->max_inodes = ctx->nr_inodes; + sbinfo->free_inodes = ctx->nr_inodes; + sbinfo->spool = NULL; + sbinfo->uid = ctx->uid; + sbinfo->gid = ctx->gid; + sbinfo->mode = ctx->mode; + + /* + * Allocate and initialize subpool if maximum or minimum size is + * specified. Any needed reservations (for minimum size) are taken + * when the subpool is created. + */ + if (ctx->max_hpages != -1 || ctx->min_hpages != -1) { + sbinfo->spool = hugepage_new_subpool(ctx->hstate, + ctx->max_hpages, + ctx->min_hpages); if (!sbinfo->spool) goto out_free; } sb->s_maxbytes = MAX_LFS_FILESIZE; - sb->s_blocksize = huge_page_size(config.hstate); - sb->s_blocksize_bits = huge_page_shift(config.hstate); + sb->s_blocksize = huge_page_size(ctx->hstate); + sb->s_blocksize_bits = huge_page_shift(ctx->hstate); sb->s_magic = HUGETLBFS_MAGIC; sb->s_op = &hugetlbfs_ops; + sb->s_d_flags = DCACHE_DONTCACHE; sb->s_time_gran = 1; - sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config)); + + /* + * Due to the special and limited functionality of hugetlbfs, it does + * not work well as a stacking filesystem. + */ + sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; + sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx)); if (!sb->s_root) goto out_free; return 0; out_free: - if (sbinfo->spool) - kfree(sbinfo->spool); + kfree(sbinfo->spool); kfree(sbinfo); return -ENOMEM; } -static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data) +static int hugetlbfs_get_tree(struct fs_context *fc) +{ + int err = hugetlbfs_validate(fc); + if (err) + return err; + return get_tree_nodev(fc, hugetlbfs_fill_super); +} + +static void hugetlbfs_fs_context_free(struct fs_context *fc) { - return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super); + kfree(fc->fs_private); +} + +static const struct fs_context_operations hugetlbfs_fs_context_ops = { + .free = hugetlbfs_fs_context_free, + .parse_param = hugetlbfs_parse_param, + .get_tree = hugetlbfs_get_tree, +}; + +static int hugetlbfs_init_fs_context(struct fs_context *fc) +{ + struct hugetlbfs_fs_context *ctx; + + ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->max_hpages = -1; /* No limit on size by default */ + ctx->nr_inodes = -1; /* No limit on number of inodes by default */ + ctx->uid = current_fsuid(); + ctx->gid = current_fsgid(); + ctx->mode = 0755; + ctx->hstate = &default_hstate; + ctx->min_hpages = -1; /* No default minimum size */ + ctx->max_val_type = NO_SIZE; + ctx->min_val_type = NO_SIZE; + fc->fs_private = ctx; + fc->ops = &hugetlbfs_fs_context_ops; + return 0; } static struct file_system_type hugetlbfs_fs_type = { - .name = "hugetlbfs", - .mount = hugetlbfs_mount, - .kill_sb = kill_litter_super, + .name = "hugetlbfs", + .init_fs_context = hugetlbfs_init_fs_context, + .parameters = hugetlb_fs_parameters, + .kill_sb = kill_anon_super, + .fs_flags = FS_ALLOW_IDMAP, }; -MODULE_ALIAS_FS("hugetlbfs"); static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; @@ -913,169 +1519,142 @@ static int get_hstate_idx(int page_size_log) if (!h) return -1; - return h - hstates; + return hstate_index(h); } -static char *hugetlb_dname(struct dentry *dentry, char *buffer, int buflen) -{ - return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)", - dentry->d_name.name); -} - -static struct dentry_operations anon_ops = { - .d_dname = hugetlb_dname -}; - /* * Note that size should be aligned to proper hugepage size in caller side, * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. */ struct file *hugetlb_file_setup(const char *name, size_t size, - vm_flags_t acctflag, struct user_struct **user, - int creat_flags, int page_size_log) + vm_flags_t acctflag, int creat_flags, + int page_size_log) { - struct file *file = ERR_PTR(-ENOMEM); struct inode *inode; - struct path path; - struct super_block *sb; - struct qstr quick_string; + struct vfsmount *mnt; int hstate_idx; + struct file *file; hstate_idx = get_hstate_idx(page_size_log); if (hstate_idx < 0) return ERR_PTR(-ENODEV); - *user = NULL; - if (!hugetlbfs_vfsmount[hstate_idx]) + mnt = hugetlbfs_vfsmount[hstate_idx]; + if (!mnt) return ERR_PTR(-ENOENT); if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { - *user = current_user(); - if (user_shm_lock(size, *user)) { - task_lock(current); - printk_once(KERN_WARNING - "%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n", + struct ucounts *ucounts = current_ucounts(); + + if (user_shm_lock(size, ucounts)) { + pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n", current->comm, current->pid); - task_unlock(current); - } else { - *user = NULL; - return ERR_PTR(-EPERM); + user_shm_unlock(size, ucounts); } + return ERR_PTR(-EPERM); } - sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb; - quick_string.name = name; - quick_string.len = strlen(quick_string.name); - quick_string.hash = 0; - path.dentry = d_alloc_pseudo(sb, &quick_string); - if (!path.dentry) - goto out_shm_unlock; - - d_set_d_op(path.dentry, &anon_ops); - path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]); file = ERR_PTR(-ENOSPC); - inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0); + /* hugetlbfs_vfsmount[] mounts do not use idmapped mounts. */ + inode = hugetlbfs_get_inode(mnt->mnt_sb, &nop_mnt_idmap, NULL, + S_IFREG | S_IRWXUGO, 0); if (!inode) - goto out_dentry; - - file = ERR_PTR(-ENOMEM); - if (hugetlb_reserve_pages(inode, 0, - size >> huge_page_shift(hstate_inode(inode)), NULL, - acctflag)) - goto out_inode; + goto out; + if (creat_flags == HUGETLB_SHMFS_INODE) + inode->i_flags |= S_PRIVATE; - d_instantiate(path.dentry, inode); inode->i_size = size; clear_nlink(inode); - file = alloc_file(&path, FMODE_WRITE | FMODE_READ, - &hugetlbfs_file_operations); - if (IS_ERR(file)) - goto out_dentry; /* inode is already attached */ + if (hugetlb_reserve_pages(inode, 0, + size >> huge_page_shift(hstate_inode(inode)), NULL, + acctflag) < 0) + file = ERR_PTR(-ENOMEM); + else + file = alloc_file_pseudo(inode, mnt, name, O_RDWR, + &hugetlbfs_file_operations); + if (!IS_ERR(file)) + return file; + iput(inode); +out: return file; +} -out_inode: - iput(inode); -out_dentry: - path_put(&path); -out_shm_unlock: - if (*user) { - user_shm_unlock(size, *user); - *user = NULL; +static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h) +{ + struct fs_context *fc; + struct vfsmount *mnt; + + fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT); + if (IS_ERR(fc)) { + mnt = ERR_CAST(fc); + } else { + struct hugetlbfs_fs_context *ctx = fc->fs_private; + ctx->hstate = h; + mnt = fc_mount_longterm(fc); + put_fs_context(fc); } - return file; + if (IS_ERR(mnt)) + pr_err("Cannot mount internal hugetlbfs for page size %luK", + huge_page_size(h) / SZ_1K); + return mnt; } static int __init init_hugetlbfs_fs(void) { + struct vfsmount *mnt; struct hstate *h; int error; int i; - error = bdi_init(&hugetlbfs_backing_dev_info); - if (error) - return error; + if (!hugepages_supported()) { + pr_info("disabling because there are no supported hugepage sizes\n"); + return -ENOTSUPP; + } error = -ENOMEM; hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", sizeof(struct hugetlbfs_inode_info), - 0, 0, init_once); + 0, SLAB_ACCOUNT, init_once); if (hugetlbfs_inode_cachep == NULL) - goto out2; + goto out; error = register_filesystem(&hugetlbfs_fs_type); if (error) - goto out; + goto out_free; + /* default hstate mount is required */ + mnt = mount_one_hugetlbfs(&default_hstate); + if (IS_ERR(mnt)) { + error = PTR_ERR(mnt); + goto out_unreg; + } + hugetlbfs_vfsmount[default_hstate_idx] = mnt; + + /* other hstates are optional */ i = 0; for_each_hstate(h) { - char buf[50]; - unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10); - - snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb); - hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type, - buf); + if (i == default_hstate_idx) { + i++; + continue; + } - if (IS_ERR(hugetlbfs_vfsmount[i])) { - pr_err("hugetlb: Cannot mount internal hugetlbfs for " - "page size %uK", ps_kb); - error = PTR_ERR(hugetlbfs_vfsmount[i]); + mnt = mount_one_hugetlbfs(h); + if (IS_ERR(mnt)) hugetlbfs_vfsmount[i] = NULL; - } + else + hugetlbfs_vfsmount[i] = mnt; i++; } - /* Non default hstates are optional */ - if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx])) - return 0; - - out: - kmem_cache_destroy(hugetlbfs_inode_cachep); - out2: - bdi_destroy(&hugetlbfs_backing_dev_info); - return error; -} - -static void __exit exit_hugetlbfs_fs(void) -{ - struct hstate *h; - int i; + return 0; - /* - * Make sure all delayed rcu free inodes are flushed before we - * destroy cache. - */ - rcu_barrier(); + out_unreg: + (void)unregister_filesystem(&hugetlbfs_fs_type); + out_free: kmem_cache_destroy(hugetlbfs_inode_cachep); - i = 0; - for_each_hstate(h) - kern_unmount(hugetlbfs_vfsmount[i++]); - unregister_filesystem(&hugetlbfs_fs_type); - bdi_destroy(&hugetlbfs_backing_dev_info); + out: + return error; } - -module_init(init_hugetlbfs_fs) -module_exit(exit_hugetlbfs_fs) - -MODULE_LICENSE("GPL"); +fs_initcall(init_hugetlbfs_fs) |
