mm: Correct page_alloc test
authorJack Miller <jack@codezen.org>
Fri, 17 Feb 2017 23:11:06 +0000 (17:11 -0600)
committerJack Miller <jack@codezen.org>
Fri, 12 Apr 2019 17:04:07 +0000 (12:04 -0500)
Since we reversed the order of the various lists, all of these rigid
assumptions broke.

mm/page_alloc.c

index 7627099..77721d5 100644 (file)
@@ -825,7 +825,7 @@ static inline void test_order_index(struct page_block *block, u32 index,
         block = block->next;
 
     assert(block->address == address);
-    assert(!nextnull != !block->next); /* logical XOR */
+    assert((!nextnull) != (!block->next)); /* logical XOR */
 }
 
 void test_page_alloc(u64 max_pages)
@@ -851,9 +851,9 @@ void test_page_alloc(u64 max_pages)
     test_orders(free_pages, free_orders);
     test_orders(used_pages, used_orders);
 
-    test_order_index(free_pages[MAX_ORDER], 0, 0, 0);
-    test_order_index(free_pages[MAX_ORDER], 1, 0x800000, 0);
-    test_order_index(free_pages[MAX_ORDER], 2, 0x1000000, 0);
+    test_order_index(free_pages[MAX_ORDER], 127, 0, 1);
+    test_order_index(free_pages[MAX_ORDER], 126, 0x800000, 0);
+    test_order_index(free_pages[MAX_ORDER], 125, 0x1000000, 0);
 
     /* Reserve 0 - 4M, should split 1 8M block in 2 4M */
     /* reserve_region won't record it on used_pages */
@@ -866,8 +866,8 @@ void test_page_alloc(u64 max_pages)
     test_orders(free_pages, free_orders);
     test_orders(used_pages, used_orders);
 
-    test_order_index(free_pages[MAX_ORDER], 0, 0x800000, 0);
-    test_order_index(free_pages[MAX_ORDER], 1, 0x1000000, 0);
+    test_order_index(free_pages[MAX_ORDER], 126, 0x800000, 1);
+    test_order_index(free_pages[MAX_ORDER], 125, 0x1000000, 0);
     test_order_index(free_pages[MAX_ORDER - 1], 0, 0x400000, 1);
 
     /* Reserve 12M - 13M, should split the 8M - 16M
@@ -883,18 +883,17 @@ void test_page_alloc(u64 max_pages)
     test_orders(free_pages, free_orders);
     test_orders(used_pages, used_orders);
 
-    test_order_index(free_pages[MAX_ORDER], 0, 0x1000000, 0);
-    test_order_index(free_pages[MAX_ORDER], 1, 0x1800000, 0);
-    test_order_index(free_pages[MAX_ORDER - 1], 0, 0x400000, 0);
-    test_order_index(free_pages[MAX_ORDER - 1], 1, 0x800000, 1);
+    test_order_index(free_pages[MAX_ORDER], 125, 0x1000000, 1);
+    test_order_index(free_pages[MAX_ORDER], 124, 0x1800000, 0);
+    test_order_index(free_pages[MAX_ORDER - 1], 1, 0x400000, 1);
+    test_order_index(free_pages[MAX_ORDER - 1], 0, 0x800000, 0);
 
     /* Now using page_alloc_phys should return the first two already
      * split blocks
      */
 
-    assert(page_alloc_phys(MAX_ORDER - 1) == 0x400000);
-
     assert(page_alloc_phys(MAX_ORDER - 1) == 0x800000);
+    assert(page_alloc_phys(MAX_ORDER - 1) == 0x400000);
 
     free_orders[MAX_ORDER - 1] = 0;
     used_orders[MAX_ORDER - 1] = 2;
@@ -906,7 +905,7 @@ void test_page_alloc(u64 max_pages)
      * lower address
      */
 
-    assert(page_alloc_phys(MAX_ORDER - 1) == 0x1000000);
+    assert(page_alloc_phys(MAX_ORDER - 1) == 0x3f800000);
 
     free_orders[MAX_ORDER] -= 1;
     free_orders[MAX_ORDER - 1] = 1;
@@ -916,12 +915,12 @@ void test_page_alloc(u64 max_pages)
     test_orders(free_pages, free_orders);
     test_orders(used_pages, used_orders);
 
-    test_order_index(free_pages[MAX_ORDER], 0, 0x1800000, 0);
-    test_order_index(free_pages[MAX_ORDER - 1], 0, 0x1400000, 1);
+    test_order_index(free_pages[MAX_ORDER], 124, 0x1000000, 1);
+    test_order_index(free_pages[MAX_ORDER - 1], 0, 0x3fc00000, 1);
 
     /* Now a free should properly reconstitute the 8M block */
 
-    page_alloc_free_phys(0x1000000);
+    page_alloc_free_phys(0x3f800000);
 
     free_orders[MAX_ORDER] += 1;
     free_orders[MAX_ORDER - 1] = 0;
@@ -931,7 +930,7 @@ void test_page_alloc(u64 max_pages)
     test_orders(free_pages, free_orders);
     test_orders(used_pages, used_orders);
 
-    test_order_index(free_pages[MAX_ORDER], 0, 0x1000000, 0);
+    test_order_index(free_pages[MAX_ORDER], 0, 0x3f800000, 0);
 
     /* We're done, now reset our structures */