Skip to content

Commit 0c4a6e7

Browse files
valdaarhunstffrdhrn
authored andcommitted
openrisc: Introduce new utility functions to flush and invalidate caches
According to the OpenRISC architecture manual, the dcache and icache may not be present. When these caches are present, the invalidate and flush registers may be absent. The current implementation does not perform checks to verify their presence before utilizing cache registers, or invalidating and flushing cache blocks. Introduce new functions to detect the presence of cache components and related special-purpose registers. There are a few places where a range of addresses have to be flushed or invalidated and the implementation is duplicated. Introduce new utility functions and macros that generalize this implementation and reduce duplication. Signed-off-by: Sahil Siddiq <sahilcdq0@gmail.com> Signed-off-by: Stafford Horne <shorne@gmail.com>
1 parent efabefb commit 0c4a6e7

File tree

5 files changed

+79
-25
lines changed

5 files changed

+79
-25
lines changed

arch/openrisc/include/asm/cacheflush.h

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,9 @@
2323
*/
2424
extern void local_dcache_page_flush(struct page *page);
2525
extern void local_icache_page_inv(struct page *page);
26+
extern void local_dcache_range_flush(unsigned long start, unsigned long end);
27+
extern void local_dcache_range_inv(unsigned long start, unsigned long end);
28+
extern void local_icache_range_inv(unsigned long start, unsigned long end);
2629

2730
/*
2831
* Data cache flushing always happen on the local cpu. Instruction cache
@@ -38,6 +41,20 @@ extern void local_icache_page_inv(struct page *page);
3841
extern void smp_icache_page_inv(struct page *page);
3942
#endif /* CONFIG_SMP */
4043

44+
/*
45+
* Even if the actual block size is larger than L1_CACHE_BYTES, paddr
46+
* can be incremented by L1_CACHE_BYTES. When paddr is written to the
47+
* invalidate register, the entire cache line encompassing this address
48+
* is invalidated. Each subsequent reference to the same cache line will
49+
* not affect the invalidation process.
50+
*/
51+
#define local_dcache_block_flush(addr) \
52+
local_dcache_range_flush(addr, addr + L1_CACHE_BYTES)
53+
#define local_dcache_block_inv(addr) \
54+
local_dcache_range_inv(addr, addr + L1_CACHE_BYTES)
55+
#define local_icache_block_inv(addr) \
56+
local_icache_range_inv(addr, addr + L1_CACHE_BYTES)
57+
4158
/*
4259
* Synchronizes caches. Whenever a cpu writes executable code to memory, this
4360
* should be called to make sure the processor sees the newly written code.

arch/openrisc/include/asm/cpuinfo.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,9 @@
1515
#ifndef __ASM_OPENRISC_CPUINFO_H
1616
#define __ASM_OPENRISC_CPUINFO_H
1717

18+
#include <asm/spr.h>
19+
#include <asm/spr_defs.h>
20+
1821
struct cache_desc {
1922
u32 size;
2023
u32 sets;
@@ -34,4 +37,9 @@ struct cpuinfo_or1k {
3437
extern struct cpuinfo_or1k cpuinfo_or1k[NR_CPUS];
3538
extern void setup_cpuinfo(void);
3639

40+
/*
41+
* Check if the cache component exists.
42+
*/
43+
extern bool cpu_cache_is_present(const unsigned int cache_type);
44+
3745
#endif /* __ASM_OPENRISC_CPUINFO_H */

arch/openrisc/kernel/dma.c

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -17,16 +17,14 @@
1717
#include <linux/pagewalk.h>
1818

1919
#include <asm/cpuinfo.h>
20+
#include <asm/cacheflush.h>
2021
#include <asm/spr_defs.h>
2122
#include <asm/tlbflush.h>
2223

2324
static int
2425
page_set_nocache(pte_t *pte, unsigned long addr,
2526
unsigned long next, struct mm_walk *walk)
2627
{
27-
unsigned long cl;
28-
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
29-
3028
pte_val(*pte) |= _PAGE_CI;
3129

3230
/*
@@ -36,8 +34,7 @@ page_set_nocache(pte_t *pte, unsigned long addr,
3634
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
3735

3836
/* Flush page out of dcache */
39-
for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
40-
mtspr(SPR_DCBFR, cl);
37+
local_dcache_range_flush(__pa(addr), __pa(next));
4138

4239
return 0;
4340
}
@@ -98,21 +95,14 @@ void arch_dma_clear_uncached(void *cpu_addr, size_t size)
9895
void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
9996
enum dma_data_direction dir)
10097
{
101-
unsigned long cl;
102-
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
103-
10498
switch (dir) {
10599
case DMA_TO_DEVICE:
106100
/* Flush the dcache for the requested range */
107-
for (cl = addr; cl < addr + size;
108-
cl += cpuinfo->dcache_block_size)
109-
mtspr(SPR_DCBFR, cl);
101+
local_dcache_range_flush(addr, addr + size);
110102
break;
111103
case DMA_FROM_DEVICE:
112104
/* Invalidate the dcache for the requested range */
113-
for (cl = addr; cl < addr + size;
114-
cl += cpuinfo->dcache_block_size)
115-
mtspr(SPR_DCBIR, cl);
105+
local_dcache_range_inv(addr, addr + size);
116106
break;
117107
default:
118108
/*

arch/openrisc/mm/cache.c

Lines changed: 47 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -14,31 +14,70 @@
1414
#include <asm/spr_defs.h>
1515
#include <asm/cache.h>
1616
#include <asm/cacheflush.h>
17+
#include <asm/cpuinfo.h>
1718
#include <asm/tlbflush.h>
1819

19-
static __always_inline void cache_loop(struct page *page, const unsigned int reg)
20+
/*
21+
* Check if the cache component exists.
22+
*/
23+
bool cpu_cache_is_present(const unsigned int cache_type)
2024
{
21-
unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
22-
unsigned long line = paddr & ~(L1_CACHE_BYTES - 1);
25+
unsigned long upr = mfspr(SPR_UPR);
26+
unsigned long mask = SPR_UPR_UP | cache_type;
27+
28+
return !((upr & mask) ^ mask);
29+
}
30+
31+
static __always_inline void cache_loop(unsigned long paddr, unsigned long end,
32+
const unsigned short reg, const unsigned int cache_type)
33+
{
34+
if (!cpu_cache_is_present(cache_type))
35+
return;
2336

24-
while (line < paddr + PAGE_SIZE) {
25-
mtspr(reg, line);
26-
line += L1_CACHE_BYTES;
37+
while (paddr < end) {
38+
mtspr(reg, paddr);
39+
paddr += L1_CACHE_BYTES;
2740
}
2841
}
2942

43+
static __always_inline void cache_loop_page(struct page *page, const unsigned short reg,
44+
const unsigned int cache_type)
45+
{
46+
unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
47+
unsigned long end = paddr + PAGE_SIZE;
48+
49+
paddr &= ~(L1_CACHE_BYTES - 1);
50+
51+
cache_loop(paddr, end, reg, cache_type);
52+
}
53+
3054
void local_dcache_page_flush(struct page *page)
3155
{
32-
cache_loop(page, SPR_DCBFR);
56+
cache_loop_page(page, SPR_DCBFR, SPR_UPR_DCP);
3357
}
3458
EXPORT_SYMBOL(local_dcache_page_flush);
3559

3660
void local_icache_page_inv(struct page *page)
3761
{
38-
cache_loop(page, SPR_ICBIR);
62+
cache_loop_page(page, SPR_ICBIR, SPR_UPR_ICP);
3963
}
4064
EXPORT_SYMBOL(local_icache_page_inv);
4165

66+
void local_dcache_range_flush(unsigned long start, unsigned long end)
67+
{
68+
cache_loop(start, end, SPR_DCBFR, SPR_UPR_DCP);
69+
}
70+
71+
void local_dcache_range_inv(unsigned long start, unsigned long end)
72+
{
73+
cache_loop(start, end, SPR_DCBIR, SPR_UPR_DCP);
74+
}
75+
76+
void local_icache_range_inv(unsigned long start, unsigned long end)
77+
{
78+
cache_loop(start, end, SPR_ICBIR, SPR_UPR_ICP);
79+
}
80+
4281
void update_cache(struct vm_area_struct *vma, unsigned long address,
4382
pte_t *pte)
4483
{
@@ -58,4 +97,3 @@ void update_cache(struct vm_area_struct *vma, unsigned long address,
5897
sync_icache_dcache(folio_page(folio, nr));
5998
}
6099
}
61-

arch/openrisc/mm/init.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@
3535
#include <asm/fixmap.h>
3636
#include <asm/tlbflush.h>
3737
#include <asm/sections.h>
38+
#include <asm/cacheflush.h>
3839

3940
int mem_init_done;
4041

@@ -176,8 +177,8 @@ void __init paging_init(void)
176177
barrier();
177178

178179
/* Invalidate instruction caches after code modification */
179-
mtspr(SPR_ICBIR, 0x900);
180-
mtspr(SPR_ICBIR, 0xa00);
180+
local_icache_block_inv(0x900);
181+
local_icache_block_inv(0xa00);
181182

182183
/* New TLB miss handlers and kernel page tables are in now place.
183184
* Make sure that page flags get updated for all pages in TLB by

0 commit comments

Comments
 (0)