mm/mempolicy: convert queue_pages_pmd() to queue_folios_pmd()
authorVishal Moola (Oracle) <vishal.moola@gmail.com>
Mon, 30 Jan 2023 20:18:29 +0000 (12:18 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 13 Feb 2023 23:54:30 +0000 (15:54 -0800)
The function now operates on a folio instead of the page associated with a
pmd.

This change is in preparation for the conversion of queue_pages_required()
to queue_folio_required() and migrate_page_add() to migrate_folio_add().

Link: https://lkml.kernel.org/r/20230130201833.27042-3-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: "Yin, Fengwei" <fengwei.yin@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/mempolicy.c

index 7686f40c97500edc40bc36e1114dbca0332b6ad0..fc754dbcbbcd625e48e0332ec52211229749814a 100644 (file)
@@ -442,21 +442,21 @@ static inline bool queue_pages_required(struct page *page,
 }
 
 /*
- * queue_pages_pmd() has three possible return values:
- * 0 - pages are placed on the right node or queued successfully, or
+ * queue_folios_pmd() has three possible return values:
+ * 0 - folios are placed on the right node or queued successfully, or
  *     special page is met, i.e. huge zero page.
- * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
  *     specified.
  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
- *        existing page was already on a node that does not follow the
+ *        existing folio was already on a node that does not follow the
  *        policy.
  */
-static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
+static int queue_folios_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
                                unsigned long end, struct mm_walk *walk)
        __releases(ptl)
 {
        int ret = 0;
-       struct page *page;
+       struct folio *folio;
        struct queue_pages *qp = walk->private;
        unsigned long flags;
 
@@ -464,19 +464,19 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
                ret = -EIO;
                goto unlock;
        }
-       page = pmd_page(*pmd);
-       if (is_huge_zero_page(page)) {
+       folio = pfn_folio(pmd_pfn(*pmd));
+       if (is_huge_zero_page(&folio->page)) {
                walk->action = ACTION_CONTINUE;
                goto unlock;
        }
-       if (!queue_pages_required(page, qp))
+       if (!queue_pages_required(&folio->page, qp))
                goto unlock;
 
        flags = qp->flags;
-       /* go to thp migration */
+       /* go to folio migration */
        if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
                if (!vma_migratable(walk->vma) ||
-                   migrate_page_add(page, qp->pagelist, flags)) {
+                   migrate_page_add(&folio->page, qp->pagelist, flags)) {
                        ret = 1;
                        goto unlock;
                }
@@ -512,7 +512,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
 
        ptl = pmd_trans_huge_lock(pmd, vma);
        if (ptl)
-               return queue_pages_pmd(pmd, ptl, addr, end, walk);
+               return queue_folios_pmd(pmd, ptl, addr, end, walk);
 
        if (pmd_trans_unstable(pmd))
                return 0;