From: Yang Shi Subject: mm: khugepaged: introduce khugepaged_enter_vma() helper The khugepaged_enter_vma_merge() actually does as the same thing as the khugepaged_enter() section called by shmem_mmap(), so consolidate them into one helper and rename it to khugepaged_enter_vma(). Link: https://lkml.kernel.org/r/20220404200250.321455-8-shy828301@gmail.com Signed-off-by: Yang Shi Cc: "Kirill A. Shutemov" Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Rik van Riel Cc: Song Liu Cc: Theodore Ts'o Cc: Vlastimil Babka Cc: Zi Yan Signed-off-by: Andrew Morton --- include/linux/khugepaged.h | 8 ++++---- mm/khugepaged.c | 26 +++++++++----------------- mm/mmap.c | 8 ++++---- mm/shmem.c | 12 ++---------- 4 files changed, 19 insertions(+), 35 deletions(-) --- a/include/linux/khugepaged.h~mm-khugepaged-introduce-khugepaged_enter_vma-helper +++ a/include/linux/khugepaged.h @@ -10,8 +10,8 @@ extern void khugepaged_destroy(void); extern int start_stop_khugepaged(void); extern void __khugepaged_enter(struct mm_struct *mm); extern void __khugepaged_exit(struct mm_struct *mm); -extern void khugepaged_enter_vma_merge(struct vm_area_struct *vma, - unsigned long vm_flags); +extern void khugepaged_enter_vma(struct vm_area_struct *vma, + unsigned long vm_flags); extern void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm); extern void khugepaged_exit(struct mm_struct *mm); @@ -49,8 +49,8 @@ static inline void khugepaged_enter(stru unsigned long vm_flags) { } -static inline void khugepaged_enter_vma_merge(struct vm_area_struct *vma, - unsigned long vm_flags) +static inline void khugepaged_enter_vma(struct vm_area_struct *vma, + unsigned long vm_flags) { } static inline void collapse_pte_mapped_thp(struct mm_struct *mm, --- a/mm/khugepaged.c~mm-khugepaged-introduce-khugepaged_enter_vma-helper +++ a/mm/khugepaged.c @@ -365,7 +365,7 @@ int hugepage_madvise(struct vm_area_stru * register it here without waiting a page fault that * may not happen any time soon. */ - khugepaged_enter_vma_merge(vma, *vm_flags); + khugepaged_enter_vma(vma, *vm_flags); break; case MADV_NOHUGEPAGE: *vm_flags &= ~VM_HUGEPAGE; @@ -505,23 +505,15 @@ void __khugepaged_enter(struct mm_struct wake_up_interruptible(&khugepaged_wait); } -void khugepaged_enter_vma_merge(struct vm_area_struct *vma, - unsigned long vm_flags) +void khugepaged_enter_vma(struct vm_area_struct *vma, + unsigned long vm_flags) { - unsigned long hstart, hend; - - /* - * khugepaged only supports read-only files for non-shmem files. - * khugepaged does not yet work on special mappings. And - * file-private shmem THP is not supported. - */ - if (!hugepage_vma_check(vma, vm_flags)) - return; - - hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; - hend = vma->vm_end & HPAGE_PMD_MASK; - if (hstart < hend) - khugepaged_enter(vma, vm_flags); + if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && + khugepaged_enabled() && + (((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < + (vma->vm_end & HPAGE_PMD_MASK))) + if (hugepage_vma_check(vma, vm_flags)) + __khugepaged_enter(vma->vm_mm); } void __khugepaged_exit(struct mm_struct *mm) --- a/mm/mmap.c~mm-khugepaged-introduce-khugepaged_enter_vma-helper +++ a/mm/mmap.c @@ -1223,7 +1223,7 @@ struct vm_area_struct *vma_merge(struct end, prev->vm_pgoff, NULL, prev); if (err) return NULL; - khugepaged_enter_vma_merge(prev, vm_flags); + khugepaged_enter_vma(prev, vm_flags); return prev; } @@ -1250,7 +1250,7 @@ struct vm_area_struct *vma_merge(struct } if (err) return NULL; - khugepaged_enter_vma_merge(area, vm_flags); + khugepaged_enter_vma(area, vm_flags); return area; } @@ -2450,7 +2450,7 @@ int expand_upwards(struct vm_area_struct } } anon_vma_unlock_write(vma->anon_vma); - khugepaged_enter_vma_merge(vma, vma->vm_flags); + khugepaged_enter_vma(vma, vma->vm_flags); validate_mm(mm); return error; } @@ -2528,7 +2528,7 @@ int expand_downwards(struct vm_area_stru } } anon_vma_unlock_write(vma->anon_vma); - khugepaged_enter_vma_merge(vma, vma->vm_flags); + khugepaged_enter_vma(vma, vma->vm_flags); validate_mm(mm); return error; } --- a/mm/shmem.c~mm-khugepaged-introduce-khugepaged_enter_vma-helper +++ a/mm/shmem.c @@ -2240,11 +2240,7 @@ static int shmem_mmap(struct file *file, file_accessed(file); vma->vm_ops = &shmem_vm_ops; - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && - ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < - (vma->vm_end & HPAGE_PMD_MASK)) { - khugepaged_enter(vma, vma->vm_flags); - } + khugepaged_enter_vma(vma, vma->vm_flags); return 0; } @@ -4143,11 +4139,7 @@ int shmem_zero_setup(struct vm_area_stru vma->vm_file = file; vma->vm_ops = &shmem_vm_ops; - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && - ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < - (vma->vm_end & HPAGE_PMD_MASK)) { - khugepaged_enter(vma, vma->vm_flags); - } + khugepaged_enter_vma(vma, vma->vm_flags); return 0; } _