Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Documentation/features/vm/TLB/arch-support.txt
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
| openrisc: | .. |
| parisc: | TODO |
| powerpc: | TODO |
| riscv: | TODO |
| riscv: | ok |
| s390: | TODO |
| sh: | TODO |
| sparc: | TODO |
Expand Down
1 change: 1 addition & 0 deletions arch/riscv/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ config RISCV
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USES_CFI_TRAPS if CFI_CLANG
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP && MMU
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_GENERAL_HUGETLB if !RISCV_ISA_SVNAPOT
Expand Down
15 changes: 15 additions & 0 deletions arch/riscv/include/asm/tlbbatch.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2023 Rivos Inc.
*/

#ifndef _ASM_RISCV_TLBBATCH_H
#define _ASM_RISCV_TLBBATCH_H

#include <linux/cpumask.h>

struct arch_tlbflush_unmap_batch {
struct cpumask cpumask;
};

#endif /* _ASM_RISCV_TLBBATCH_H */
8 changes: 8 additions & 0 deletions arch/riscv/include/asm/tlbflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,14 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
#endif

bool arch_tlbbatch_should_defer(struct mm_struct *mm);
void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
struct mm_struct *mm,
unsigned long uaddr);
void arch_flush_tlb_batched_pending(struct mm_struct *mm);
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);

#else /* CONFIG_SMP && CONFIG_MMU */

#define flush_tlb_all() local_flush_tlb_all()
Expand Down
97 changes: 77 additions & 20 deletions arch/riscv/mm/tlbflush.c
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/hugetlb.h>
#include <asm/sbi.h>
#include <asm/mmu_context.h>
#include <asm/cpufeature.h>
Expand Down Expand Up @@ -106,29 +107,23 @@ static void __ipi_flush_tlb_range_asid(void *info)
local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
}

static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long size, unsigned long stride)
static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid,
unsigned long start, unsigned long size,
unsigned long stride)
{
struct flush_tlb_range_data ftd;
const struct cpumask *cmask;
unsigned long asid = FLUSH_TLB_NO_ASID;
bool broadcast;

if (mm) {
unsigned int cpuid;
if (cpumask_empty(cmask))
return;

cmask = mm_cpumask(mm);
if (cpumask_empty(cmask))
return;
if (cmask != cpu_online_mask) {
unsigned int cpuid;

cpuid = get_cpu();
/* check if the tlbflush needs to be sent to other CPUs */
broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;

if (static_branch_unlikely(&use_asid_allocator))
asid = atomic_long_read(&mm->context.id) & asid_mask;
} else {
cmask = cpu_online_mask;
broadcast = true;
}

Expand All @@ -148,42 +143,104 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
local_flush_tlb_range_asid(start, size, stride, asid);
}

if (mm)
if (cmask != cpu_online_mask)
put_cpu();
}

static inline unsigned long get_mm_asid(struct mm_struct *mm)
{
return static_branch_unlikely(&use_asid_allocator) ?
atomic_long_read(&mm->context.id) & asid_mask : FLUSH_TLB_NO_ASID;
}

void flush_tlb_mm(struct mm_struct *mm)
{
__flush_tlb_range(mm, 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
__flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
}

void flush_tlb_mm_range(struct mm_struct *mm,
unsigned long start, unsigned long end,
unsigned int page_size)
{
__flush_tlb_range(mm, start, end - start, page_size);
__flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
start, end - start, page_size);
}

void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
__flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
__flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
addr, PAGE_SIZE, PAGE_SIZE);
}

void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
__flush_tlb_range(vma->vm_mm, start, end - start, PAGE_SIZE);
unsigned long stride_size;

if (!is_vm_hugetlb_page(vma)) {
stride_size = PAGE_SIZE;
} else {
stride_size = huge_page_size(hstate_vma(vma));

/*
* As stated in the privileged specification, every PTE in a
* NAPOT region must be invalidated, so reset the stride in that
* case.
*/
if (has_svnapot()) {
if (stride_size >= PGDIR_SIZE)
stride_size = PGDIR_SIZE;
else if (stride_size >= P4D_SIZE)
stride_size = P4D_SIZE;
else if (stride_size >= PUD_SIZE)
stride_size = PUD_SIZE;
else if (stride_size >= PMD_SIZE)
stride_size = PMD_SIZE;
else
stride_size = PAGE_SIZE;
}
}

__flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
start, end - start, stride_size);
}

void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
__flush_tlb_range(NULL, start, end - start, PAGE_SIZE);
__flush_tlb_range(cpu_online_mask, FLUSH_TLB_NO_ASID,
start, end - start, PAGE_SIZE);
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
__flush_tlb_range(vma->vm_mm, start, end - start, PMD_SIZE);
__flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
start, end - start, PMD_SIZE);
}
#endif

bool arch_tlbbatch_should_defer(struct mm_struct *mm)
{
return true;
}

void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
struct mm_struct *mm,
unsigned long uaddr)
{
cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
}

void arch_flush_tlb_batched_pending(struct mm_struct *mm)
{
flush_tlb_mm(mm);
}

void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
{
__flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0,
FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
cpumask_clear(&batch->cpumask);
}