Line data Source code
1 : #ifndef _LINUX_HIGHMEM_H
2 : #define _LINUX_HIGHMEM_H
3 :
4 : #include <linux/fs.h>
5 : #include <linux/kernel.h>
6 : #include <linux/bug.h>
7 : #include <linux/mm.h>
8 : #include <linux/uaccess.h>
9 : #include <linux/hardirq.h>
10 :
11 : #include <asm/cacheflush.h>
12 :
13 : #ifndef ARCH_HAS_FLUSH_ANON_PAGE
14 : static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
15 : {
16 : }
17 : #endif
18 :
19 : #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
20 : static inline void flush_kernel_dcache_page(struct page *page)
21 : {
22 : }
23 : static inline void flush_kernel_vmap_range(void *vaddr, int size)
24 : {
25 : }
26 : static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
27 : {
28 : }
29 : #endif
30 :
31 : #include <asm/kmap_types.h>
32 :
33 : #ifdef CONFIG_HIGHMEM
34 : #include <asm/highmem.h>
35 :
36 : /* declarations for linux/mm/highmem.c */
37 : unsigned int nr_free_highpages(void);
38 : extern unsigned long totalhigh_pages;
39 :
40 : void kmap_flush_unused(void);
41 :
42 : struct page *kmap_to_page(void *addr);
43 :
44 : #else /* CONFIG_HIGHMEM */
45 :
46 : static inline unsigned int nr_free_highpages(void) { return 0; }
47 :
48 : static inline struct page *kmap_to_page(void *addr)
49 : {
50 : return virt_to_page(addr);
51 : }
52 :
53 : #define totalhigh_pages 0UL
54 :
55 : #ifndef ARCH_HAS_KMAP
56 : static inline void *kmap(struct page *page)
57 : {
58 413408 : might_sleep();
59 : return page_address(page);
60 : }
61 :
62 : static inline void kunmap(struct page *page)
63 : {
64 : }
65 :
66 : static inline void *kmap_atomic(struct page *page)
67 : {
68 : pagefault_disable();
69 : return page_address(page);
70 : }
71 : #define kmap_atomic_prot(page, prot) kmap_atomic(page)
72 :
73 : static inline void __kunmap_atomic(void *addr)
74 : {
75 : pagefault_enable();
76 : }
77 :
78 : #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
79 : #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
80 :
81 : #define kmap_flush_unused() do {} while(0)
82 : #endif
83 :
84 : #endif /* CONFIG_HIGHMEM */
85 :
86 : #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
87 :
88 : DECLARE_PER_CPU(int, __kmap_atomic_idx);
89 :
90 : static inline int kmap_atomic_idx_push(void)
91 : {
92 : int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
93 :
94 : #ifdef CONFIG_DEBUG_HIGHMEM
95 : WARN_ON_ONCE(in_irq() && !irqs_disabled());
96 : BUG_ON(idx >= KM_TYPE_NR);
97 : #endif
98 : return idx;
99 : }
100 :
101 : static inline int kmap_atomic_idx(void)
102 : {
103 : return __this_cpu_read(__kmap_atomic_idx) - 1;
104 : }
105 :
106 : static inline void kmap_atomic_idx_pop(void)
107 : {
108 : #ifdef CONFIG_DEBUG_HIGHMEM
109 : int idx = __this_cpu_dec_return(__kmap_atomic_idx);
110 :
111 : BUG_ON(idx < 0);
112 : #else
113 : __this_cpu_dec(__kmap_atomic_idx);
114 : #endif
115 : }
116 :
117 : #endif
118 :
119 : /*
120 : * Prevent people trying to call kunmap_atomic() as if it were kunmap()
121 : * kunmap_atomic() should get the return value of kmap_atomic, not the page.
122 : */
123 : #define kunmap_atomic(addr) \
124 : do { \
125 : BUILD_BUG_ON(__same_type((addr), struct page *)); \
126 : __kunmap_atomic(addr); \
127 : } while (0)
128 :
129 :
130 : /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
131 : #ifndef clear_user_highpage
132 : static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
133 : {
134 : void *addr = kmap_atomic(page);
135 : clear_user_page(addr, vaddr, page);
136 : kunmap_atomic(addr);
137 : }
138 : #endif
139 :
140 : #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
141 : /**
142 : * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
143 : * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
144 : * @vma: The VMA the page is to be allocated for
145 : * @vaddr: The virtual address the page will be inserted into
146 : *
147 : * This function will allocate a page for a VMA but the caller is expected
148 : * to specify via movableflags whether the page will be movable in the
149 : * future or not
150 : *
151 : * An architecture may override this function by defining
152 : * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
153 : * implementation.
154 : */
155 : static inline struct page *
156 : __alloc_zeroed_user_highpage(gfp_t movableflags,
157 : struct vm_area_struct *vma,
158 : unsigned long vaddr)
159 : {
160 : struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
161 : vma, vaddr);
162 :
163 : if (page)
164 : clear_user_highpage(page, vaddr);
165 :
166 : return page;
167 : }
168 : #endif
169 :
170 : /**
171 : * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
172 : * @vma: The VMA the page is to be allocated for
173 : * @vaddr: The virtual address the page will be inserted into
174 : *
175 : * This function will allocate a page for a VMA that the caller knows will
176 : * be able to migrate in the future using move_pages() or reclaimed
177 : */
178 : static inline struct page *
179 : alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
180 : unsigned long vaddr)
181 : {
182 : return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
183 : }
184 :
185 : static inline void clear_highpage(struct page *page)
186 : {
187 : void *kaddr = kmap_atomic(page);
188 : clear_page(kaddr);
189 : kunmap_atomic(kaddr);
190 : }
191 :
192 2081 : static inline void zero_user_segments(struct page *page,
193 : unsigned start1, unsigned end1,
194 : unsigned start2, unsigned end2)
195 : {
196 : void *kaddr = kmap_atomic(page);
197 :
198 2081 : BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
199 :
200 2081 : if (end1 > start1)
201 2081 : memset(kaddr + start1, 0, end1 - start1);
202 :
203 2081 : if (end2 > start2)
204 0 : memset(kaddr + start2, 0, end2 - start2);
205 :
206 : kunmap_atomic(kaddr);
207 : flush_dcache_page(page);
208 2081 : }
209 :
210 : static inline void zero_user_segment(struct page *page,
211 : unsigned start, unsigned end)
212 : {
213 2081 : zero_user_segments(page, start, end, 0, 0);
214 : }
215 :
216 : static inline void zero_user(struct page *page,
217 : unsigned start, unsigned size)
218 : {
219 : zero_user_segments(page, start, start + size, 0, 0);
220 : }
221 :
222 : #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
223 :
224 : static inline void copy_user_highpage(struct page *to, struct page *from,
225 : unsigned long vaddr, struct vm_area_struct *vma)
226 : {
227 : char *vfrom, *vto;
228 :
229 : vfrom = kmap_atomic(from);
230 : vto = kmap_atomic(to);
231 : copy_user_page(vto, vfrom, vaddr, to);
232 : kunmap_atomic(vto);
233 : kunmap_atomic(vfrom);
234 : }
235 :
236 : #endif
237 :
238 : static inline void copy_highpage(struct page *to, struct page *from)
239 : {
240 : char *vfrom, *vto;
241 :
242 : vfrom = kmap_atomic(from);
243 : vto = kmap_atomic(to);
244 : copy_page(vto, vfrom);
245 : kunmap_atomic(vto);
246 : kunmap_atomic(vfrom);
247 : }
248 :
249 : #endif /* _LINUX_HIGHMEM_H */
|