mm/khugepaged: convert is_refcount_suitable() to use folios
authorVishal Moola (Oracle) <vishal.moola@gmail.com>
Fri, 20 Oct 2023 18:33:29 +0000 (11:33 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 25 Oct 2023 23:47:14 +0000 (16:47 -0700)
Both callers of is_refcount_suitable() have been converted to use
folios, so convert it to take in a folio. Both callers only operate on
head pages of folios so mapcount/refcount conversions here are trivial.

Removes 3 calls to compound head, and removes 315 bytes of kernel text.

Link: https://lkml.kernel.org/r/20231020183331.10770-4-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Yang Shi <shy828301@gmail.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/khugepaged.c

index 6c4b5af4337144ee1a90e9988448a6a027d07ea1..9efd8ff68f063a84d749ad534ca79e1bd45ac3cc 100644 (file)
@@ -524,15 +524,15 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte,
        }
 }
 
-static bool is_refcount_suitable(struct page *page)
+static bool is_refcount_suitable(struct folio *folio)
 {
        int expected_refcount;
 
-       expected_refcount = total_mapcount(page);
-       if (PageSwapCache(page))
-               expected_refcount += compound_nr(page);
+       expected_refcount = folio_mapcount(folio);
+       if (folio_test_swapcache(folio))
+               expected_refcount += folio_nr_pages(folio);
 
-       return page_count(page) == expected_refcount;
+       return folio_ref_count(folio) == expected_refcount;
 }
 
 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
@@ -625,7 +625,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                 * but not from this process. The other process cannot write to
                 * the page, only trigger CoW.
                 */
-               if (!is_refcount_suitable(&folio->page)) {
+               if (!is_refcount_suitable(folio)) {
                        folio_unlock(folio);
                        result = SCAN_PAGE_COUNT;
                        goto out;
@@ -1371,7 +1371,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
                 * has excessive GUP pins (i.e. 512).  Anyway the same check
                 * will be done again later the risk seems low.
                 */
-               if (!is_refcount_suitable(&folio->page)) {
+               if (!is_refcount_suitable(folio)) {
                        result = SCAN_PAGE_COUNT;
                        goto out_unmap;
                }