1010 */
1111#include <paging.h>
1212
13- int8 page_bitmap [amount_of_pages / 8 ];
13+ uint64_t memory_start ;
14+ uint64_t memory_end ;
15+ size_t amount_of_pages ;
16+ int8 * page_bitmap ;
17+
1418extern uint8_t user_code_start [];
1519extern uint8_t user_code_end [];
1620
17- void initialize_page_bitmap () {
18- info ("Initializing paging!" , __FILE__ );
19- for (size_t i = 0 ; i < sizeof (page_bitmap ) / sizeof (page_bitmap [0 ]); ++ i ) {
20- page_bitmap [i ] = 0 ;
21+ static void mark_page_used (uint64_t addr ) {
22+ if (addr < MEMORY_START || addr >= MEMORY_END ) return ;
23+ size_t page_index = (addr - MEMORY_START ) / PAGE_SIZE ;
24+ size_t byte = page_index / 8 ;
25+ size_t bit = page_index % 8 ;
26+ page_bitmap [byte ] |= (1 << bit );
27+ }
28+
29+ static void mark_page_free (uint64_t addr ) {
30+ if (addr < MEMORY_START || addr >= MEMORY_END ) return ;
31+ size_t page_index = (addr - MEMORY_START ) / PAGE_SIZE ;
32+ size_t byte = page_index / 8 ;
33+ size_t bit = page_index % 8 ;
34+ page_bitmap [byte ] &= ~(1 << bit );
35+ }
36+
37+ void initialize_page_bitmap (int64 kernel_start , int64 kernel_end ) {
38+ info ("Initializing paging" , __FILE__ );
39+
40+ size_t physical_memory_size = 32 MiB ;
41+ void * memory_block = kmalloc (physical_memory_size );
42+ if (!memory_block ) {
43+ error ("Failed to allocate physical memory block!" , __FILE__ );
44+ return ;
2145 }
22- done ("Successfully initialized page bitmap!" , __FILE__ );
46+
47+ memory_start = (uint64_t )memory_block ;
48+ memory_end = memory_start + physical_memory_size ;
49+ amount_of_pages = physical_memory_size / PAGE_SIZE ;
50+
51+ size_t bitmap_size = amount_of_pages / 8 ;
52+ page_bitmap = (int8 * )kmalloc (bitmap_size );
53+ if (!page_bitmap ) {
54+ error ("Failed to allocate page bitmap!" , __FILE__ );
55+ return ;
56+ }
57+
58+ memset (page_bitmap , 0 , bitmap_size );
59+
60+ // Reserve kernel memory
61+ for (uint64_t addr = (uint64_t )kernel_start ; addr < (uint64_t )kernel_end ; addr += PAGE_SIZE )
62+ mark_page_used (addr );
63+
64+ // Reserve user code memory
65+ for (uint64_t addr = (uint64_t )user_code_start ; addr < (uint64_t )user_code_end ; addr += PAGE_SIZE )
66+ mark_page_used (addr );
67+
68+ mm_print_out ();
69+
70+ done ("Successfully initialized page bitmap" , __FILE__ );
2371}
2472
2573void * allocate_page () {
2674 for (size_t i = 0 ; i < amount_of_pages ; ++ i ) {
2775 size_t byte = i / 8 ;
2876 size_t bit = i % 8 ;
29-
3077 if (!(page_bitmap [byte ] & (1 << bit ))) {
3178 page_bitmap [byte ] |= (1 << bit );
32- return (void * )(memory_start + i * page_size );
79+ return (void * )(memory_start + i * PAGE_SIZE );
3380 }
3481 }
35-
3682 error ("Out of physical pages!" , __FILE__ );
3783 return NULL ;
3884}
3985
4086void free_page (void * addr ) {
41- size_t page_index = ((int64 )addr - memory_start ) / page_size ;
42-
43- size_t byte_offset = page_index / 8 ;
44- size_t bit_offset = page_index % 8 ;
45-
46- page_bitmap [byte_offset ] &= ~(1 << bit_offset );
87+ uint64_t aligned_addr = ((uint64_t )addr / PAGE_SIZE ) * PAGE_SIZE ;
88+ mark_page_free (aligned_addr );
4789}
4890
4991static inline uint64_t get_kernel_pml4 () {
@@ -75,7 +117,7 @@ uint64_t virtual_to_physical(uint64_t virt) {
75117 return phys ;
76118}
77119
78- void map_user_page (uint64_t virt , uint64_t phys , int executable ) {
120+ void map_user_page (uint64_t virt , uint64_t phys , uint64_t flags ) {
79121 // Traverse or create PML4 -> PDPT -> PD -> PT
80122 uint64_t * pml4 = (uint64_t * )get_kernel_pml4 (); // kernel PML4
81123 uint64_t * pdpt , * pd , * pt ;
@@ -112,13 +154,6 @@ void map_user_page(uint64_t virt, uint64_t phys, int executable) {
112154 pt = (uint64_t * )(pd [pd_idx ] & ~0xFFF );
113155 }
114156
115- // Map the physical page
116- uint64_t flags = PAGE_PRESENT | PAGE_USER ;
117- if (!executable ) flags |= PAGE_RW ; // writable if data/stack
118- else flags &= ~PAGE_RW ; // code = read-only
119-
120- if (!executable ) flags |= PAGE_RW ;
121-
122157 pt [pt_idx ] = phys | flags ;
123158
124159 // Flush TLB
@@ -128,13 +163,40 @@ void map_user_page(uint64_t virt, uint64_t phys, int executable) {
128163void map_user_code () {
129164 uint64_t size = (uint64_t )user_code_end - (uint64_t )user_code_start ;
130165
131- for (uint64_t off = 0 ; off < size ; off += page_size ) {
132- uint64_t phys = (uint64_t )allocate_page ();
133- void * virt = (void * )(phys + KERNEL_OFFSET );
166+ debug_printf ("size of user code -> %z\n" , size );
167+ debug_printf ("user code start -> %z\n" , user_code_start );
168+ debug_printf ("user code end -> %z\n" , user_code_end );
169+
170+ for (uint64_t off = 0 ; off < size ; off += PAGE_SIZE ) {
171+ void * kernel_va = allocate_page ();
172+ uint64_t phys = (int64 )virtual_to_physical ((int64 )kernel_va );
173+
174+ if (!phys )
175+ error ("page allocation failed" , __FILE__ );
176+ else
177+ info ("page allocation is fine" , __FILE__ );
178+
179+ uint64_t vaddr = USER_CODE_VADDR + off ;
180+ map_user_page (vaddr , phys , USER_CODE_FLAGS );
181+ info ("map_user_code: mapped executable user page" , __FILE__ );
182+
183+ // Verify mapping
184+ uint64_t resolved = virtual_to_physical (vaddr ) & ~0xFFF ;
185+ if (resolved != (phys & ~0xFFF )) {
186+ error ("map_user_code: VA->PA mismatch" , __FILE__ );
187+ } else {
188+ info ("map_user_code: VA->PA matches" , __FILE__ );
189+ }
134190
135- uint64_t copy = (size - off >= page_size ) ? page_size : (size - off );
136- memcpy (virt , user_code_start + off , copy );
191+ // The code bytes to copy.
192+ uint64_t copy = (size - off >= PAGE_SIZE ) ? PAGE_SIZE : (size - off );
193+ memcpy (kernel_va , user_code_start + off , copy );
137194
138- map_user_page (USER_CODE_VADDR + off , phys , 1 );
195+ // Verify the copy of userland.
196+ if (memcmp (kernel_va , user_code_start + off , copy ) != 0 ) {
197+ error ("map_user_code: memcpy verification failed" , __FILE__ );
198+ } else {
199+ info ("map_user_code: memcpy verification succeeded" , __FILE__ );
200+ }
139201 }
140202}
0 commit comments