Skip to content

Commit

Permalink
xtensa: implement the new page table range API
Browse files Browse the repository at this point in the history
Add PFN_PTE_SHIFT, update_mmu_cache_range(), flush_dcache_folio() and
flush_icache_pages().

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
Acked-by: Mike Rapoport (IBM) <[email protected]>
Cc: Max Filippov <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
Matthew Wilcox (Oracle) authored and akpm00 committed Aug 22, 2023
1 parent b4b6c10 commit 103e9aa
Show file tree
Hide file tree
Showing 3 changed files with 63 additions and 47 deletions.
9 changes: 8 additions & 1 deletion arch/xtensa/include/asm/cacheflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -119,8 +119,14 @@ void flush_cache_page(struct vm_area_struct*,
#define flush_cache_vmap(start,end) flush_cache_all()
#define flush_cache_vunmap(start,end) flush_cache_all()

void flush_dcache_folio(struct folio *folio);
#define flush_dcache_folio flush_dcache_folio

#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *);
static inline void flush_dcache_page(struct page *page)
{
flush_dcache_folio(page_folio(page));
}

void local_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
Expand Down Expand Up @@ -156,6 +162,7 @@ void local_flush_cache_page(struct vm_area_struct *vma,

/* This is not required, see Documentation/core-api/cachetlb.rst */
#define flush_icache_page(vma,page) do { } while (0)
#define flush_icache_pages(vma, page, nr) do { } while (0)

#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
Expand Down
18 changes: 8 additions & 10 deletions arch/xtensa/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,7 @@ static inline pte_t pte_mkwrite(pte_t pte)
* and a page entry and page directory to the page they refer to.
*/

#define PFN_PTE_SHIFT PAGE_SHIFT
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
#define pte_same(a,b) (pte_val(a) == pte_val(b))
#define pte_page(x) pfn_to_page(pte_pfn(x))
Expand Down Expand Up @@ -301,15 +302,9 @@ static inline void update_pte(pte_t *ptep, pte_t pteval)

struct mm_struct;

static inline void
set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
{
update_pte(ptep, pteval);
}

static inline void set_pte(pte_t *ptep, pte_t pteval)
static inline void set_pte(pte_t *ptep, pte_t pte)
{
update_pte(ptep, pteval);
update_pte(ptep, pte);
}

static inline void
Expand Down Expand Up @@ -407,8 +402,11 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)

#else

extern void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t *ptep);
struct vm_fault;
void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, unsigned int nr);
#define update_mmu_cache(vma, address, ptep) \
update_mmu_cache_range(NULL, vma, address, ptep, 1)

typedef pte_t *pte_addr_t;

Expand Down
83 changes: 47 additions & 36 deletions arch/xtensa/mm/cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -121,9 +121,9 @@ EXPORT_SYMBOL(copy_user_highpage);
*
*/

void flush_dcache_page(struct page *page)
void flush_dcache_folio(struct folio *folio)
{
struct address_space *mapping = page_mapping_file(page);
struct address_space *mapping = folio_flush_mapping(folio);

/*
* If we have a mapping but the page is not mapped to user-space
Expand All @@ -132,14 +132,14 @@ void flush_dcache_page(struct page *page)
*/

if (mapping && !mapping_mapped(mapping)) {
if (!test_bit(PG_arch_1, &page->flags))
set_bit(PG_arch_1, &page->flags);
if (!test_bit(PG_arch_1, &folio->flags))
set_bit(PG_arch_1, &folio->flags);
return;

} else {

unsigned long phys = page_to_phys(page);
unsigned long temp = page->index << PAGE_SHIFT;
unsigned long phys = folio_pfn(folio) * PAGE_SIZE;
unsigned long temp = folio_pos(folio);
unsigned int i, nr = folio_nr_pages(folio);
unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
unsigned long virt;

Expand All @@ -154,22 +154,26 @@ void flush_dcache_page(struct page *page)
return;

preempt_disable();
virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(virt, phys);
for (i = 0; i < nr; i++) {
virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(virt, phys);

virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);

if (alias)
__flush_invalidate_dcache_page_alias(virt, phys);
if (alias)
__flush_invalidate_dcache_page_alias(virt, phys);

if (mapping)
__invalidate_icache_page_alias(virt, phys);
if (mapping)
__invalidate_icache_page_alias(virt, phys);
phys += PAGE_SIZE;
temp += PAGE_SIZE;
}
preempt_enable();
}

/* There shouldn't be an entry in the cache for this page anymore. */
}
EXPORT_SYMBOL(flush_dcache_page);
EXPORT_SYMBOL(flush_dcache_folio);

/*
* For now, flush the whole cache. FIXME??
Expand Down Expand Up @@ -207,45 +211,52 @@ EXPORT_SYMBOL(local_flush_cache_page);

#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */

void
update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep, unsigned int nr)
{
unsigned long pfn = pte_pfn(*ptep);
struct page *page;
struct folio *folio;
unsigned int i;

if (!pfn_valid(pfn))
return;

page = pfn_to_page(pfn);
folio = page_folio(pfn_to_page(pfn));

/* Invalidate old entry in TLBs */

flush_tlb_page(vma, addr);
/* Invalidate old entries in TLBs */
for (i = 0; i < nr; i++)
flush_tlb_page(vma, addr + i * PAGE_SIZE);
nr = folio_nr_pages(folio);

#if (DCACHE_WAY_SIZE > PAGE_SIZE)

if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
unsigned long phys = page_to_phys(page);
if (!folio_test_reserved(folio) && test_bit(PG_arch_1, &folio->flags)) {
unsigned long phys = folio_pfn(folio) * PAGE_SIZE;
unsigned long tmp;

preempt_disable();
tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(tmp, phys);
tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(tmp, phys);
__invalidate_icache_page_alias(tmp, phys);
for (i = 0; i < nr; i++) {
tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(tmp, phys);
tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(tmp, phys);
__invalidate_icache_page_alias(tmp, phys);
phys += PAGE_SIZE;
}
preempt_enable();

clear_bit(PG_arch_1, &page->flags);
clear_bit(PG_arch_1, &folio->flags);
}
#else
if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
if (!folio_test_reserved(folio) && !test_bit(PG_arch_1, &folio->flags)
&& (vma->vm_flags & VM_EXEC) != 0) {
unsigned long paddr = (unsigned long)kmap_atomic(page);
__flush_dcache_page(paddr);
__invalidate_icache_page(paddr);
set_bit(PG_arch_1, &page->flags);
kunmap_atomic((void *)paddr);
for (i = 0; i < nr; i++) {
void *paddr = kmap_local_folio(folio, i * PAGE_SIZE);
__flush_dcache_page((unsigned long)paddr);
__invalidate_icache_page((unsigned long)paddr);
kunmap_local(paddr);
}
set_bit(PG_arch_1, &folio->flags);
}
#endif
}
Expand Down

0 comments on commit 103e9aa

Please sign in to comment.