Skip to content

Commit

Permalink
mm: add do_set_pte_range()
Browse files Browse the repository at this point in the history
do_set_pte_range() allows to setup page table entries for a
specific range. It calls folio_add_file_rmap_range() to take
advantage of batched rmap update for large folio.

Signed-off-by: Yin Fengwei <[email protected]>
  • Loading branch information
fyin1 authored and intel-lab-lkp committed Feb 6, 2023
1 parent 772deb0 commit f4cb29e
Show file tree
Hide file tree
Showing 3 changed files with 49 additions and 21 deletions.
3 changes: 3 additions & 0 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -1169,6 +1169,9 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)

vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr);
void do_set_pte_range(struct vm_fault *vmf, struct folio *folio,
unsigned long addr, pte_t *pte,
unsigned long start, unsigned int nr);

vm_fault_t finish_fault(struct vm_fault *vmf);
vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
Expand Down
1 change: 0 additions & 1 deletion mm/filemap.c
Original file line number Diff line number Diff line change
Expand Up @@ -3376,7 +3376,6 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,

ref_count++;
do_set_pte(vmf, page, addr);
update_mmu_cache(vma, addr, vmf->pte);
} while (vmf->pte++, page++, addr += PAGE_SIZE, ++count < nr_pages);

/* Restore the vmf->pte */
Expand Down
66 changes: 46 additions & 20 deletions mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -4257,36 +4257,65 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
}
#endif

void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
void do_set_pte_range(struct vm_fault *vmf, struct folio *folio,
unsigned long addr, pte_t *pte,
unsigned long start, unsigned int nr)
{
struct vm_area_struct *vma = vmf->vma;
bool uffd_wp = pte_marker_uffd_wp(vmf->orig_pte);
bool write = vmf->flags & FAULT_FLAG_WRITE;
bool cow = write && !(vma->vm_flags & VM_SHARED);
bool prefault = vmf->address != addr;
struct page *page = folio_page(folio, start);
pte_t entry;

flush_icache_page(vma, page);
entry = mk_pte(page, vma->vm_page_prot);
if (!cow) {
folio_add_file_rmap_range(folio, start, nr, vma, false);
add_mm_counter(vma->vm_mm, mm_counter_file(page), nr);
} else {
/*
* rmap code is not ready to handle COW with anonymous
* large folio yet. Capture and warn if large folio
* is given.
*/
VM_WARN_ON_FOLIO(folio_test_large(folio), folio);
}

if (prefault && arch_wants_old_prefaulted_pte())
entry = pte_mkold(entry);
else
entry = pte_sw_mkyoung(entry);
do {
flush_icache_page(vma, page);
entry = mk_pte(page, vma->vm_page_prot);

if (write)
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
if (unlikely(uffd_wp))
entry = pte_mkuffd_wp(entry);
/* copy-on-write page */
if (write && !(vma->vm_flags & VM_SHARED)) {
if (prefault && arch_wants_old_prefaulted_pte())
entry = pte_mkold(entry);
else
entry = pte_sw_mkyoung(entry);

if (write)
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
if (unlikely(uffd_wp))
entry = pte_mkuffd_wp(entry);
set_pte_at(vma->vm_mm, addr, pte, entry);

/* no need to invalidate: a not-present page won't be cached */
update_mmu_cache(vma, addr, pte);
} while (pte++, page++, addr += PAGE_SIZE, --nr > 0);
}

void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
{
struct folio *folio = page_folio(page);
struct vm_area_struct *vma = vmf->vma;
bool cow = (vmf->flags & FAULT_FLAG_WRITE) &&
!(vma->vm_flags & VM_SHARED);

if (cow) {
inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, addr);
lru_cache_add_inactive_or_unevictable(page, vma);
} else {
inc_mm_counter(vma->vm_mm, mm_counter_file(page));
page_add_file_rmap(page, vma, false);
}
set_pte_at(vma->vm_mm, addr, vmf->pte, entry);

do_set_pte_range(vmf, folio, addr, vmf->pte,
folio_page_idx(folio, page), 1);
}

static bool vmf_pte_changed(struct vm_fault *vmf)
Expand Down Expand Up @@ -4361,9 +4390,6 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
if (likely(!vmf_pte_changed(vmf))) {
do_set_pte(vmf, page, vmf->address);

/* no need to invalidate: a not-present page won't be cached */
update_mmu_cache(vma, vmf->address, vmf->pte);

ret = 0;
} else {
update_mmu_tlb(vma, vmf->address, vmf->pte);
Expand Down

0 comments on commit f4cb29e

Please sign in to comment.