Skip to content

Commit

Permalink
filemap: Add filemap_map_folio_range()
Browse files Browse the repository at this point in the history
filemap_map_folio_range() maps partial/full folio. Comparing to original
filemap_map_pages(), it updates refcount once per folio instead of per
page and gets minor performance improvement for large folio.

With a will-it-scale.page_fault3 like app (change file write
fault testing to read fault testing. Trying to upstream it to
will-it-scale at [1]), got 2% performance gain on a 48C/96T
Cascade Lake test box with 96 processes running against xfs.

[1]: antonblanchard/will-it-scale#37

Signed-off-by: Yin Fengwei <[email protected]>
Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
  • Loading branch information
fyin1 authored and intel-lab-lkp committed Feb 7, 2023
1 parent 49a8133 commit c03c696
Showing 1 changed file with 54 additions and 44 deletions.
98 changes: 54 additions & 44 deletions mm/filemap.c
Original file line number Diff line number Diff line change
Expand Up @@ -2200,16 +2200,6 @@ unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
}
EXPORT_SYMBOL(filemap_get_folios);

static inline
bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
{
if (!folio_test_large(folio) || folio_test_hugetlb(folio))
return false;
if (index >= max)
return false;
return index < folio->index + folio_nr_pages(folio) - 1;
}

/**
* filemap_get_folios_contig - Get a batch of contiguous folios
* @mapping: The address_space to search
Expand Down Expand Up @@ -3351,6 +3341,53 @@ static inline struct folio *next_map_page(struct address_space *mapping,
mapping, xas, end_pgoff);
}

/*
* Map page range [start_page, start_page + nr_pages) of folio.
* start_page is gotten from start by folio_page(folio, start)
*/
static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
struct folio *folio, unsigned long start,
unsigned long addr, unsigned int nr_pages)
{
vm_fault_t ret = 0;
struct vm_area_struct *vma = vmf->vma;
struct file *file = vma->vm_file;
struct page *page = folio_page(folio, start);
unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
unsigned int ref_count = 0, count = 0;

do {
if (PageHWPoison(page))
continue;

if (mmap_miss > 0)
mmap_miss--;

/*
* NOTE: If there're PTE markers, we'll leave them to be
* handled in the specific fault path, and it'll prohibit the
* fault-around logic.
*/
if (!pte_none(*vmf->pte))
continue;

if (vmf->address == addr)
ret = VM_FAULT_NOPAGE;

ref_count++;
do_set_pte(vmf, page, addr);
update_mmu_cache(vma, addr, vmf->pte);
} while (vmf->pte++, page++, addr += PAGE_SIZE, ++count < nr_pages);

/* Restore the vmf->pte */
vmf->pte -= nr_pages;

folio_ref_add(folio, ref_count);
WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);

return ret;
}

vm_fault_t filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff)
{
Expand All @@ -3361,9 +3398,9 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
unsigned long addr;
XA_STATE(xas, &mapping->i_pages, start_pgoff);
struct folio *folio;
struct page *page;
unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
vm_fault_t ret = 0;
int nr_pages = 0;

rcu_read_lock();
folio = first_map_page(mapping, &xas, end_pgoff);
Expand All @@ -3378,45 +3415,18 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
do {
again:
page = folio_file_page(folio, xas.xa_index);
if (PageHWPoison(page))
goto unlock;

if (mmap_miss > 0)
mmap_miss--;
unsigned long end;

addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
vmf->pte += xas.xa_index - last_pgoff;
last_pgoff = xas.xa_index;
end = folio->index + folio_nr_pages(folio) - 1;
nr_pages = min(end, end_pgoff) - xas.xa_index + 1;

/*
* NOTE: If there're PTE markers, we'll leave them to be
* handled in the specific fault path, and it'll prohibit the
* fault-around logic.
*/
if (!pte_none(*vmf->pte))
goto unlock;

/* We're about to handle the fault */
if (vmf->address == addr)
ret = VM_FAULT_NOPAGE;
ret |= filemap_map_folio_range(vmf, folio,
xas.xa_index - folio->index, addr, nr_pages);
xas.xa_index += nr_pages;

do_set_pte(vmf, page, addr);
/* no need to invalidate: a not-present page won't be cached */
update_mmu_cache(vma, addr, vmf->pte);
if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
xas.xa_index++;
folio_ref_inc(folio);
goto again;
}
folio_unlock(folio);
continue;
unlock:
if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
xas.xa_index++;
goto again;
}
folio_unlock(folio);
folio_put(folio);
} while ((folio = next_map_page(mapping, &xas, end_pgoff)) != NULL);
Expand Down

0 comments on commit c03c696

Please sign in to comment.