From: "Liam R. Howlett" Subject: mm/mmap: use advanced maple tree API for mmap_region() Changing mmap_region() to use the maple tree state and the advanced maple tree interface allows for a lot less tree walking. This change removes the last caller of munmap_vma_range(), so drop this unused function. Add vma_expand() to expand a VMA if possible by doing the necessary hugepage check, uprobe_munmap of files, dcache flush, modifications then undoing the detaches, etc. Link: https://lkml.kernel.org/r/20220426150616.3937571-26-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: Catalin Marinas Cc: David Howells Cc: "Matthew Wilcox (Oracle)" Cc: Vlastimil Babka Cc: Will Deacon Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/mmap.c | 245 ++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 198 insertions(+), 47 deletions(-) --- a/mm/mmap.c~mm-mmap-use-advanced-maple-tree-api-for-mmap_region +++ a/mm/mmap.c @@ -516,28 +516,6 @@ static inline struct vm_area_struct *__v return vma->vm_next; } -/* - * munmap_vma_range() - munmap VMAs that overlap a range. - * @mm: The mm struct - * @start: The start of the range. - * @len: The length of the range. - * @pprev: pointer to the pointer that will be set to previous vm_area_struct - * - * Find all the vm_area_struct that overlap from @start to - * @end and munmap them. Set @pprev to the previous vm_area_struct. - * - * Returns: -ENOMEM on munmap failure or 0 on success. - */ -static inline int -munmap_vma_range(struct mm_struct *mm, unsigned long start, unsigned long len, - struct vm_area_struct **pprev, struct list_head *uf) -{ - while (range_has_overlap(mm, start, start + len, pprev)) - if (do_munmap(mm, start, len, uf)) - return -ENOMEM; - return 0; -} - static unsigned long count_vma_pages_range(struct mm_struct *mm, unsigned long addr, unsigned long end) { @@ -647,6 +625,127 @@ static void __insert_vm_struct(struct mm } /* + * vma_expand - Expand an existing VMA + * + * @mas: The maple state + * @vma: The vma to expand + * @start: The start of the vma + * @end: The exclusive end of the vma + * @pgoff: The page offset of vma + * @next: The current of next vma. + * + * Expand @vma to @start and @end. Can expand off the start and end. Will + * expand over @next if it's different from @vma and @end == @next->vm_end. + * Checking if the @vma can expand and merge with @next needs to be handled by + * the caller. + * + * Returns: 0 on success + */ +inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma, + unsigned long start, unsigned long end, pgoff_t pgoff, + struct vm_area_struct *next) +{ + struct mm_struct *mm = vma->vm_mm; + struct address_space *mapping = NULL; + struct rb_root_cached *root = NULL; + struct anon_vma *anon_vma = vma->anon_vma; + struct file *file = vma->vm_file; + bool remove_next = false; + bool anon_cloned = false; + + if (next && (vma != next) && (end == next->vm_end)) { + remove_next = true; + if (next->anon_vma && !vma->anon_vma) { + int error; + + vma->anon_vma = next->anon_vma; + error = anon_vma_clone(vma, next); + if (error) + return error; + anon_cloned = true; + } + } + + /* Not merging but overwriting any part of next is not handled. */ + VM_BUG_ON(!remove_next && next != vma && end > next->vm_start); + /* Only handles expanding */ + VM_BUG_ON(vma->vm_start < start || vma->vm_end > end); + + if (mas_preallocate(mas, vma, GFP_KERNEL)) + goto nomem; + + vma_adjust_trans_huge(vma, start, end, 0); + + if (file) { + mapping = file->f_mapping; + root = &mapping->i_mmap; + uprobe_munmap(vma, vma->vm_start, vma->vm_end); + i_mmap_lock_write(mapping); + flush_dcache_mmap_lock(mapping); + vma_interval_tree_remove(vma, root); + } else if (anon_vma) { + anon_vma_lock_write(anon_vma); + anon_vma_interval_tree_pre_update_vma(vma); + } + + vma->vm_start = start; + vma->vm_end = end; + vma->vm_pgoff = pgoff; + /* Note: mas must be pointing to the expanding VMA */ + vma_mas_store(vma, mas); + + if (file) { + vma_interval_tree_insert(vma, root); + flush_dcache_mmap_unlock(mapping); + } + + /* Expanding over the next vma */ + if (remove_next) { + /* Remove from mm linked list - also updates highest_vm_end */ + __vma_unlink_list(mm, next); + + /* Kill the cache */ + vmacache_invalidate(mm); + + if (file) + __remove_shared_vm_struct(next, file, mapping); + + } else if (!next) { + mm->highest_vm_end = vm_end_gap(vma); + } + + if (anon_vma) { + anon_vma_interval_tree_post_update_vma(vma); + anon_vma_unlock_write(anon_vma); + } + + if (file) { + i_mmap_unlock_write(mapping); + uprobe_mmap(vma); + } + + if (remove_next) { + if (file) { + uprobe_munmap(next, next->vm_start, next->vm_end); + fput(file); + } + if (next->anon_vma) + anon_vma_merge(vma, next); + mm->map_count--; + mpol_put(vma_policy(next)); + vm_area_free(next); + } + + validate_mm(mm); + return 0; + +nomem: + if (anon_cloned) + unlink_anon_vmas(vma); + return -ENOMEM; +} + +/* * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that * is already present in an i_mmap tree without adjusting the tree. * The following helper function should be used when such adjustments @@ -1646,9 +1745,15 @@ unsigned long mmap_region(struct file *f struct list_head *uf) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma, *prev, *merge; - int error; + struct vm_area_struct *vma = NULL; + struct vm_area_struct *prev, *next; + pgoff_t pglen = len >> PAGE_SHIFT; unsigned long charged = 0; + unsigned long end = addr + len; + unsigned long merge_start = addr, merge_end = end; + pgoff_t vm_pgoff; + int error; + MA_STATE(mas, &mm->mm_mt, addr, end - 1); /* Check against address space limit. */ if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) { @@ -1658,16 +1763,17 @@ unsigned long mmap_region(struct file *f * MAP_FIXED may remove pages of mappings that intersects with * requested mapping. Account for the pages it would unmap. */ - nr_pages = count_vma_pages_range(mm, addr, addr + len); + nr_pages = count_vma_pages_range(mm, addr, end); if (!may_expand_vm(mm, vm_flags, (len >> PAGE_SHIFT) - nr_pages)) return -ENOMEM; } - /* Clear old maps, set up prev and uf */ - if (munmap_vma_range(mm, addr, len, &prev, uf)) + /* Unmap any existing mapping in the area */ + if (do_munmap(mm, addr, len, uf)) return -ENOMEM; + /* * Private writable mapping: check memory availability */ @@ -1678,14 +1784,43 @@ unsigned long mmap_region(struct file *f vm_flags |= VM_ACCOUNT; } - /* - * Can we just expand an old mapping? - */ - vma = vma_merge(mm, prev, addr, addr + len, vm_flags, - NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX, NULL); - if (vma) - goto out; + next = mas_next(&mas, ULONG_MAX); + prev = mas_prev(&mas, 0); + if (vm_flags & VM_SPECIAL) + goto cannot_expand; + + /* Attempt to expand an old mapping */ + /* Check next */ + if (next && next->vm_start == end && !vma_policy(next) && + can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen, + NULL_VM_UFFD_CTX, NULL)) { + merge_end = next->vm_end; + vma = next; + vm_pgoff = next->vm_pgoff - pglen; + } + + /* Check prev */ + if (prev && prev->vm_end == addr && !vma_policy(prev) && + (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file, + pgoff, vma->vm_userfaultfd_ctx, NULL) : + can_vma_merge_after(prev, vm_flags, NULL, file, pgoff, + NULL_VM_UFFD_CTX , NULL))) { + merge_start = prev->vm_start; + vma = prev; + vm_pgoff = prev->vm_pgoff; + } + + /* Actually expand, if possible */ + if (vma && + !vma_expand(&mas, vma, merge_start, merge_end, vm_pgoff, next)) { + khugepaged_enter_vma_merge(vma, vm_flags); + goto expanded; + } + + mas.index = addr; + mas.last = end - 1; +cannot_expand: /* * Determine the object being mapped and call the appropriate * specific mapper. the address has already been validated, but @@ -1698,7 +1833,7 @@ unsigned long mmap_region(struct file *f } vma->vm_start = addr; - vma->vm_end = addr + len; + vma->vm_end = end; vma->vm_flags = vm_flags; vma->vm_page_prot = vm_get_page_prot(vm_flags); vma->vm_pgoff = pgoff; @@ -1719,28 +1854,30 @@ unsigned long mmap_region(struct file *f * * Answer: Yes, several device drivers can do it in their * f_op->mmap method. -DaveM - * Bug: If addr is changed, prev, rb_link, rb_parent should - * be updated for vma_link() */ WARN_ON_ONCE(addr != vma->vm_start); addr = vma->vm_start; + mas_reset(&mas); /* If vm_flags changed after call_mmap(), we should try merge vma again * as we may succeed this time. */ if (unlikely(vm_flags != vma->vm_flags && prev)) { - merge = vma_merge(mm, prev, vma->vm_start, vma->vm_end, vma->vm_flags, + next = vma_merge(mm, prev, vma->vm_start, vma->vm_end, vma->vm_flags, NULL, vma->vm_file, vma->vm_pgoff, NULL, NULL_VM_UFFD_CTX, NULL); - if (merge) { + if (next) { /* ->mmap() can change vma->vm_file and fput the original file. So * fput the vma->vm_file here or we would add an extra fput for file * and cause general protection fault ultimately. */ fput(vma->vm_file); vm_area_free(vma); - vma = merge; - /* Update vm_flags to pick up the change. */ + vma = prev; + /* Update vm_flags and possible addr to pick up the change. We don't + * warn here if addr changed as the vma is not linked by vma_link(). + */ + addr = vma->vm_start; vm_flags = vma->vm_flags; goto unmap_writable; } @@ -1770,7 +1907,7 @@ unsigned long mmap_region(struct file *f */ khugepaged_enter_vma(vma, vma->vm_flags); - if (vma_link(mm, vma, prev)) { + if (mas_preallocate(&mas, vma, GFP_KERNEL)) { error = -ENOMEM; if (file) goto unmap_and_free_vma; @@ -1778,12 +1915,28 @@ unsigned long mmap_region(struct file *f goto free_vma; } + if (vma->vm_file) + i_mmap_lock_write(vma->vm_file->f_mapping); + + vma_mas_store(vma, &mas); + __vma_link_list(mm, vma, prev); + mm->map_count++; + if (vma->vm_file) { + if (vma->vm_flags & VM_SHARED) + mapping_allow_writable(vma->vm_file->f_mapping); + + flush_dcache_mmap_lock(vma->vm_file->f_mapping); + vma_interval_tree_insert(vma, &vma->vm_file->f_mapping->i_mmap); + flush_dcache_mmap_unlock(vma->vm_file->f_mapping); + i_mmap_unlock_write(vma->vm_file->f_mapping); + } + /* Once vma denies write, undo our temporary denial count */ unmap_writable: if (file && vm_flags & VM_SHARED) mapping_unmap_writable(file->f_mapping); file = vma->vm_file; -out: +expanded: perf_event_mmap(vma); vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT); @@ -1810,6 +1963,7 @@ out: vma_set_page_prot(vma); + validate_mm(mm); return addr; unmap_and_free_vma: @@ -1826,6 +1980,7 @@ free_vma: unacct_error: if (charged) vm_unacct_memory(charged); + validate_mm(mm); return error; } @@ -2605,10 +2760,6 @@ int __do_munmap(struct mm_struct *mm, un prev = vma->vm_prev; /* we have start < vma->vm_end */ - /* if it doesn't overlap, we have nothing.. */ - if (vma->vm_start >= end) - return 0; - /* * If we need to split any vma, do it now to save pain later. * _