1 /* vim: set ts=4 sw=4 sts=4 et : */
7 static void __early_map_check(u64 * entry, u64 * target, u64 * alloc_base)
9 if (!(*entry & PF_P)) {
10 *entry = (*alloc_base) | (PF_RW | PF_P);
12 /* reset_cr3 so the newly mapped page is accessible, zero it, then
13 * reset cr3 again to make sure no crap mappings are in there. */
19 *alloc_base = (*alloc_base) + PAGE_SIZE;
23 void *map_page_early(u64 virtual, u64 physical, u64 * alloc_base)
25 u64 *pml4e = (u64 *) PML4E_ADDR(virtual);
26 u64 *pdpe = (u64 *) PDPE_ADDR(virtual);
27 u64 *pde = (u64 *) PDE_ADDR(virtual);
28 u64 *pte = (u64 *) PTE_ADDR(virtual);
30 /* Make sure all of these structures exist, and if not, alloc and zero them */
32 __early_map_check(pml4e, pdpe, alloc_base);
33 __early_map_check(pdpe, pde, alloc_base);
34 __early_map_check(pde, pte, alloc_base);
36 *pte = PAGE_ALIGN(physical) | PF_RW | PF_P;
40 return (void *)virtual;
43 /* Similar to __early_map_check, except it can use page_alloc_phys, and check
46 static int __map_check(u64 * entry, u64 * target)
50 if (!(*entry & PF_P)) {
51 /* Loop so we can leak pages that __clear_page determines are unresponsive */
53 /* This seems like it should be the physical page allocator's problem,
54 * but in order for it to check we'd have to make it depend on
55 * early_map with a known address. Considering this is literally the
56 * only consumer of page_alloc_phys outside of the page allocator and
57 * should remain so, instead perform the check here where we already
58 * have a known virtual target address.
62 phys = page_alloc_phys(0);
66 *entry = (phys | PF_RW | PF_P);
69 } while(__clear_page(target));
79 int __map_page_flags(u64 virtual, u64 physical, u8 flags)
81 u64 *pml4e, *pdpe, *pde, *pte = NULL;
84 pml4e = (u64 *) PML4E_ADDR(virtual);
85 pdpe = (u64 *) PDPE_ADDR(virtual);
86 pde = (u64 *) PDE_ADDR(virtual);
87 pte = (u64 *) PTE_ADDR(virtual);
89 /* Make sure all of these structures exist, and if not, alloc and zero them */
91 ret = __map_check(pml4e, pdpe);
97 ret = __map_check(pdpe, pde);
103 ret = __map_check(pde, pte);
107 *pte = PAGE_ALIGN(physical) | PF_P | flags;
113 page_alloc_free_phys(PAGE_ALIGN(*pdpe));
119 page_alloc_free_phys(PAGE_ALIGN(*pml4e));
128 int __table_empty(u64 * table)
131 for (i = 0; i < 512; i++) {
139 void __unmap_page(u64 virtual)
141 u64 *pml4e, *pdpe, *pde, *pte = NULL;
144 pml4e = (u64 *) PML4E_ADDR(virtual);
145 pdpe = (u64 *) PDPE_ADDR(virtual);
146 pde = (u64 *) PDE_ADDR(virtual);
147 pte = (u64 *) PTE_ADDR(virtual);
149 pdp = (u64 *) PAGE_ALIGN((u64) pdpe);
150 pd = (u64 *) PAGE_ALIGN((u64) pde);
151 pt = (u64 *) PAGE_ALIGN((u64) pte);
153 /* If the mapping doesn't exist, get out */
154 if (!(*pml4e & PF_P))
164 if (__table_empty(pt)) {
165 page_alloc_free_phys(PAGE_ALIGN(*pde));
168 if (__table_empty(pd)) {
169 page_alloc_free_phys(PAGE_ALIGN(*pdpe));
172 if (__table_empty(pdp)) {
173 page_alloc_free_phys(PAGE_ALIGN(*pml4e));
180 void unmap_pages(u64 virtual, u64 num_pages)
183 __unmap_page(virtual);
184 virtual += PAGE_SIZE;
189 int __map_pages_flags(u64 virtual, u64 physical, u64 num_pages, u8 flags)
193 for (i = 0; i < num_pages; i++) {
194 ret = __map_page_flags(virtual, physical, flags);
197 for (j = i; j > 0; j--) {
198 virtual -= PAGE_SIZE;
199 physical -= PAGE_SIZE;
200 unmap_pages(virtual, 1);
205 virtual += PAGE_SIZE;
206 physical += PAGE_SIZE;
212 int map_pages(u64 virtual, u64 physical, u64 num_pages)
214 int ret = __map_pages_flags(virtual, physical, num_pages, PF_RW);
219 int map_pages_nocache(u64 virtual, u64 physical, u64 num_pages)
222 __map_pages_flags(virtual, physical, num_pages,
223 PF_RW | PF_DISABLE_CACHE);
228 u64 map_virt_to_phys(u64 virtual)
230 u64 *pml4e = (u64 *) PML4E_ADDR(virtual);
231 u64 *pdpe = (u64 *) PDPE_ADDR(virtual);
232 u64 *pde = (u64 *) PDE_ADDR(virtual);
233 u64 *pte = (u64 *) PTE_ADDR(virtual);
235 if (!(*pml4e & PF_P))
243 return PAGE_ALIGN(*pte);
246 void map_page_summarize(u64 virtual)
248 u64 phys = map_virt_to_phys(virtual);
250 printk("M: 0x%lx -> 0x%lx\n", virtual, phys);