aboutsummaryrefslogtreecommitdiff
path: root/lib/sbi/sbi_tlb.c
diff options
context:
space:
mode:
authorAnup Patel <anup.patel@wdc.com>2020-03-04 14:21:42 +0530
committerAnup Patel <anup@brainfault.org>2020-03-11 15:29:57 +0530
commitd96316481dbc9a52e7e97c4cef70957507c2845f (patch)
treed786e665940f2cebd911be703fec3329fc0b6db2 /lib/sbi/sbi_tlb.c
parenta4a6a81b7d69b39f4d806e96e6b54a9e0e36d3f3 (diff)
lib: sbi_tlb: Use sbi_hartmask in sbi_tlb_info
Instead of using single ulong as source mask for sbi_tlb_info, we use sbi_hartmask. This way sbi_tlb_info can easily scale for large number of HARTs. Signed-off-by: Anup Patel <anup.patel@wdc.com> Reviewed-by: Bin Meng <bmeng.cn@gmail.com> Reviewed-by: Atish Patra <atish.patra@wdc.com>
Diffstat (limited to 'lib/sbi/sbi_tlb.c')
-rw-r--r--lib/sbi/sbi_tlb.c17
1 files changed, 7 insertions, 10 deletions
diff --git a/lib/sbi/sbi_tlb.c b/lib/sbi/sbi_tlb.c
index 21e2436..3cae7ee 100644
--- a/lib/sbi/sbi_tlb.c
+++ b/lib/sbi/sbi_tlb.c
@@ -190,17 +190,14 @@ static void sbi_tlb_local_flush(struct sbi_tlb_info *tinfo)
static void sbi_tlb_entry_process(struct sbi_scratch *scratch,
struct sbi_tlb_info *tinfo)
{
- u32 i;
- u64 m;
+ u32 rhartid;
struct sbi_scratch *rscratch = NULL;
unsigned long *rtlb_sync = NULL;
sbi_tlb_local_flush(tinfo);
- for (i = 0, m = tinfo->shart_mask; m; i++, m >>= 1) {
- if (!(m & 1UL))
- continue;
- rscratch = sbi_hart_id_to_scratch(scratch, i);
+ sbi_hartmask_for_each_hart(rhartid, &tinfo->smask) {
+ rscratch = sbi_hart_id_to_scratch(scratch, rhartid);
rtlb_sync = sbi_scratch_offset_ptr(rscratch, tlb_sync_off);
while (atomic_raw_xchg_ulong(rtlb_sync, 1)) ;
}
@@ -263,11 +260,11 @@ static inline int __sbi_tlb_range_check(struct sbi_tlb_info *curr,
if (next->start <= curr->start && next_end > curr_end) {
curr->start = next->start;
curr->size = next->size;
- curr->shart_mask = curr->shart_mask | next->shart_mask;
- ret = SBI_FIFO_UPDATED;
+ sbi_hartmask_or(&curr->smask, &curr->smask, &next->smask);
+ ret = SBI_FIFO_UPDATED;
} else if (next->start >= curr->start && next_end <= curr_end) {
- curr->shart_mask = curr->shart_mask | next->shart_mask;
- ret = SBI_FIFO_SKIP;
+ sbi_hartmask_or(&curr->smask, &curr->smask, &next->smask);
+ ret = SBI_FIFO_SKIP;
}
return ret;