netfs, cifs: Move CIFS_INO_MODIFIED_ATTR to netfs_inode
[sfrench/cifs-2.6.git] / arch / xtensa / include / asm / cacheflush.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * (C) 2001 - 2013 Tensilica Inc.
7  */
8
9 #ifndef _XTENSA_CACHEFLUSH_H
10 #define _XTENSA_CACHEFLUSH_H
11
12 #include <linux/mm.h>
13 #include <asm/processor.h>
14 #include <asm/page.h>
15
16 /*
17  * Lo-level routines for cache flushing.
18  *
19  * invalidate data or instruction cache:
20  *
21  * __invalidate_icache_all()
22  * __invalidate_icache_page(adr)
23  * __invalidate_dcache_page(adr)
24  * __invalidate_icache_range(from,size)
25  * __invalidate_dcache_range(from,size)
26  *
27  * flush data cache:
28  *
29  * __flush_dcache_page(adr)
30  *
31  * flush and invalidate data cache:
32  *
33  * __flush_invalidate_dcache_all()
34  * __flush_invalidate_dcache_page(adr)
35  * __flush_invalidate_dcache_range(from,size)
36  *
37  * specials for cache aliasing:
38  *
39  * __flush_invalidate_dcache_page_alias(vaddr,paddr)
40  * __invalidate_dcache_page_alias(vaddr,paddr)
41  * __invalidate_icache_page_alias(vaddr,paddr)
42  */
43
44 extern void __invalidate_dcache_all(void);
45 extern void __invalidate_icache_all(void);
46 extern void __invalidate_dcache_page(unsigned long);
47 extern void __invalidate_icache_page(unsigned long);
48 extern void __invalidate_icache_range(unsigned long, unsigned long);
49 extern void __invalidate_dcache_range(unsigned long, unsigned long);
50
51 #if XCHAL_DCACHE_IS_WRITEBACK
52 extern void __flush_invalidate_dcache_all(void);
53 extern void __flush_dcache_page(unsigned long);
54 extern void __flush_dcache_range(unsigned long, unsigned long);
55 extern void __flush_invalidate_dcache_page(unsigned long);
56 extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
57 #else
58 static inline void __flush_dcache_page(unsigned long va)
59 {
60 }
61 static inline void __flush_dcache_range(unsigned long va, unsigned long sz)
62 {
63 }
64 # define __flush_invalidate_dcache_all()        __invalidate_dcache_all()
65 # define __flush_invalidate_dcache_page(p)      __invalidate_dcache_page(p)
66 # define __flush_invalidate_dcache_range(p,s)   __invalidate_dcache_range(p,s)
67 #endif
68
69 #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
70 extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
71 extern void __invalidate_dcache_page_alias(unsigned long, unsigned long);
72 #else
73 static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
74                                                         unsigned long phys) { }
75 static inline void __invalidate_dcache_page_alias(unsigned long virt,
76                                                   unsigned long phys) { }
77 #endif
78 #if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
79 extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
80 #else
81 static inline void __invalidate_icache_page_alias(unsigned long virt,
82                                                 unsigned long phys) { }
83 #endif
84
85 /*
86  * We have physically tagged caches - nothing to do here -
87  * unless we have cache aliasing.
88  *
89  * Pages can get remapped. Because this might change the 'color' of that page,
90  * we have to flush the cache before the PTE is changed.
91  * (see also Documentation/core-api/cachetlb.rst)
92  */
93
94 #if defined(CONFIG_MMU) && \
95         ((DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP))
96
97 #ifdef CONFIG_SMP
98 void flush_cache_all(void);
99 void flush_cache_range(struct vm_area_struct*, ulong, ulong);
100 void flush_icache_range(unsigned long start, unsigned long end);
101 void flush_cache_page(struct vm_area_struct*,
102                              unsigned long, unsigned long);
103 #define flush_cache_all flush_cache_all
104 #define flush_cache_range flush_cache_range
105 #define flush_icache_range flush_icache_range
106 #define flush_cache_page flush_cache_page
107 #else
108 #define flush_cache_all local_flush_cache_all
109 #define flush_cache_range local_flush_cache_range
110 #define flush_icache_range local_flush_icache_range
111 #define flush_cache_page  local_flush_cache_page
112 #endif
113
114 #define local_flush_cache_all()                                         \
115         do {                                                            \
116                 __flush_invalidate_dcache_all();                        \
117                 __invalidate_icache_all();                              \
118         } while (0)
119
120 #define flush_cache_mm(mm)              flush_cache_all()
121 #define flush_cache_dup_mm(mm)          flush_cache_mm(mm)
122
123 #define flush_cache_vmap(start,end)             flush_cache_all()
124 #define flush_cache_vmap_early(start,end)       do { } while (0)
125 #define flush_cache_vunmap(start,end)           flush_cache_all()
126
127 void flush_dcache_folio(struct folio *folio);
128 #define flush_dcache_folio flush_dcache_folio
129
130 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
131 static inline void flush_dcache_page(struct page *page)
132 {
133         flush_dcache_folio(page_folio(page));
134 }
135
136 void local_flush_cache_range(struct vm_area_struct *vma,
137                 unsigned long start, unsigned long end);
138 void local_flush_cache_page(struct vm_area_struct *vma,
139                 unsigned long address, unsigned long pfn);
140
141 #else
142
143 #define flush_icache_range local_flush_icache_range
144
145 #endif
146
147 #define flush_icache_user_range flush_icache_range
148
149 /* Ensure consistency between data and instruction cache. */
150 #define local_flush_icache_range(start, end)                            \
151         do {                                                            \
152                 __flush_dcache_range(start, (end) - (start));           \
153                 __invalidate_icache_range(start,(end) - (start));       \
154         } while (0)
155
156 #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
157
158 extern void copy_to_user_page(struct vm_area_struct*, struct page*,
159                 unsigned long, void*, const void*, unsigned long);
160 extern void copy_from_user_page(struct vm_area_struct*, struct page*,
161                 unsigned long, void*, const void*, unsigned long);
162 #define copy_to_user_page copy_to_user_page
163 #define copy_from_user_page copy_from_user_page
164
165 #else
166
167 #define copy_to_user_page(vma, page, vaddr, dst, src, len)              \
168         do {                                                            \
169                 memcpy(dst, src, len);                                  \
170                 __flush_dcache_range((unsigned long) dst, len);         \
171                 __invalidate_icache_range((unsigned long) dst, len);    \
172         } while (0)
173
174 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
175         memcpy(dst, src, len)
176
177 #endif
178
179 #include <asm-generic/cacheflush.h>
180
181 #endif /* _XTENSA_CACHEFLUSH_H */