From: Anshuman Khandual Subject: mm/debug_vm_pgtable: drop protection_map[] usage Patch series "mm: protection_map[] cleanups". This patch (of 2): Although protection_map[] contains the platform defined page protection map for a given vm_flags combination, vm_get_page_prot() is the right interface to use. This will also reduce dependency on protection_map[] which is going to be dropped off completely later on. Link: https://lkml.kernel.org/r/20220404031840.588321-1-anshuman.khandual@arm.com Link: https://lkml.kernel.org/r/20220404031840.588321-2-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Reviewed-by: Christoph Hellwig Signed-off-by: Andrew Morton --- mm/debug_vm_pgtable.c | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) --- a/mm/debug_vm_pgtable.c~mm-debug_vm_pgtable-drop-protection_map-usage +++ a/mm/debug_vm_pgtable.c @@ -93,7 +93,7 @@ struct pgtable_debug_args { static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx) { - pgprot_t prot = protection_map[idx]; + pgprot_t prot = vm_get_page_prot(idx); pte_t pte = pfn_pte(args->fixed_pte_pfn, prot); unsigned long val = idx, *ptr = &val; @@ -101,7 +101,7 @@ static void __init pte_basic_tests(struc /* * This test needs to be executed after the given page table entry - * is created with pfn_pte() to make sure that protection_map[idx] + * is created with pfn_pte() to make sure that vm_get_page_prot(idx) * does not have the dirty bit enabled from the beginning. This is * important for platforms like arm64 where (!PTE_RDONLY) indicate * dirty bit being set. @@ -190,7 +190,7 @@ static void __init pte_savedwrite_tests( #ifdef CONFIG_TRANSPARENT_HUGEPAGE static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { - pgprot_t prot = protection_map[idx]; + pgprot_t prot = vm_get_page_prot(idx); unsigned long val = idx, *ptr = &val; pmd_t pmd; @@ -202,7 +202,7 @@ static void __init pmd_basic_tests(struc /* * This test needs to be executed after the given page table entry - * is created with pfn_pmd() to make sure that protection_map[idx] + * is created with pfn_pmd() to make sure that vm_get_page_prot(idx) * does not have the dirty bit enabled from the beginning. This is * important for platforms like arm64 where (!PTE_RDONLY) indicate * dirty bit being set. @@ -325,7 +325,7 @@ static void __init pmd_savedwrite_tests( #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { - pgprot_t prot = protection_map[idx]; + pgprot_t prot = vm_get_page_prot(idx); unsigned long val = idx, *ptr = &val; pud_t pud; @@ -337,7 +337,7 @@ static void __init pud_basic_tests(struc /* * This test needs to be executed after the given page table entry - * is created with pfn_pud() to make sure that protection_map[idx] + * is created with pfn_pud() to make sure that vm_get_page_prot(idx) * does not have the dirty bit enabled from the beginning. This is * important for platforms like arm64 where (!PTE_RDONLY) indicate * dirty bit being set. @@ -1119,14 +1119,14 @@ static int __init init_args(struct pgtab /* * Initialize the debugging data. * - * protection_map[0] (or even protection_map[8]) will help create - * page table entries with PROT_NONE permission as required for - * pxx_protnone_tests(). + * vm_get_page_prot(VM_NONE) or vm_get_page_prot(VM_SHARED|VM_NONE) + * will help create page table entries with PROT_NONE permission as + * required for pxx_protnone_tests(). */ memset(args, 0, sizeof(*args)); args->vaddr = get_random_vaddr(); args->page_prot = vm_get_page_prot(VMFLAGS); - args->page_prot_none = protection_map[0]; + args->page_prot_none = vm_get_page_prot(VM_NONE); args->is_contiguous_page = false; args->pud_pfn = ULONG_MAX; args->pmd_pfn = ULONG_MAX; @@ -1261,12 +1261,19 @@ static int __init debug_vm_pgtable(void) return ret; /* - * Iterate over the protection_map[] to make sure that all + * Iterate over each possible vm_flags to make sure that all * the basic page table transformation validations just hold * true irrespective of the starting protection value for a * given page table entry. + * + * Protection based vm_flags combinatins are always linear + * and increasing i.e starting from VM_NONE and going upto + * (VM_SHARED | READ | WRITE | EXEC). */ - for (idx = 0; idx < ARRAY_SIZE(protection_map); idx++) { +#define VM_FLAGS_START (VM_NONE) +#define VM_FLAGS_END (VM_SHARED | VM_EXEC | VM_WRITE | VM_READ) + + for (idx = VM_FLAGS_START; idx <= VM_FLAGS_END; idx++) { pte_basic_tests(&args, idx); pmd_basic_tests(&args, idx); pud_basic_tests(&args, idx); _