IBNOS
paging.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014, Michael Müller
3  * Copyright (c) 2014, Sebastian Lackner
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  * list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
19  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  */
27 
28 #include <memory/paging.h>
29 #include <memory/physmem.h>
30 #include <interrupt/interrupt.h>
31 #include <process/process.h>
32 #include <process/thread.h>
33 #include <console/console.h>
34 #include <util/list.h>
35 #include <util/util.h>
36 
38 {
39  uint32_t startIndex;
40  uint32_t length;
41 };
42 
43 #define MAX_BOOT_ENTRIES 1024
44 static struct bootMapEntry pagingBootMap[MAX_BOOT_ENTRIES];
45 static uint32_t pagingNumBootMaps = 0;
46 
47 #define KERNEL_DIR_ENTRY (PAGETABLE_COUNT - 1)
48 #define KERNEL_DIR_ADDR (((KERNEL_DIR_ENTRY << PAGETABLE_BITS) | KERNEL_DIR_ENTRY) << PAGE_BITS)
49 #define KERNEL_PAGE_ADDR (KERNEL_DIR_ENTRY << (PAGETABLE_BITS + PAGE_BITS))
50 
51 static bool pagingInitialized = false;
52 
53 static const char *error_virtualAddressInUse[] =
54 {
55  " INTERNAL ERROR ",
56  " Requested virtual memory address is already in use",
57  NULL
58 };
59 
60 static const char *error_virtualAddressSpaceFull[] =
61 {
62  " OUT OF MEMORY ",
63  " Unable to fulfill request because the virtual address space is exhausted",
64  NULL
65 };
66 
67 /* possible flags when present == 0 */
68 #define PAGING_AVAIL_NOTPRESENT_RESERVED 1 /* frame == 0, denies allocation for everyone with lower privileges */
69 #define PAGING_AVAIL_NOTPRESENT_ON_ACCESS_CREATE 2 /* frame == 0, creates a new page on access with rw = 1 */
70 #define PAGING_AVAIL_NOTPRESENT_OUTPAGED 3 /* frame points to some external device where the page is located */
71 
72 /* possible flags when present == 1 */
73 #define PAGING_AVAIL_PRESENT_SHARED 1 /* shared, will not be duplicated when forking */
74 #define PAGING_AVAIL_PRESENT_NO_FORK 2 /* doesn't copy this block while forking */
75 #define PAGING_AVAIL_PRESENT_ON_WRITE_DUPLICATE 3 /* readwrite=0, copy (and release) the physical frame pointed to by frame */
76 
77 uint32_t __getCR0();
78 asm(".text\n.align 4\n"
79 "__getCR0:\n"
80 " movl %cr0, %eax\n"
81 " ret\n"
82 );
83 
84 uint32_t __setCR0(uint32_t value);
85 asm(".text\n.align 4\n"
86 "__setCR0:\n"
87 " movl 4(%esp), %eax\n"
88 " movl %eax, %cr0\n"
89 " ret\n"
90 );
91 
92 uint32_t __getCR3();
93 asm(".text\n.align 4\n"
94 "__getCR3:\n"
95 " movl %cr3, %eax\n"
96 " ret\n"
97 );
98 
99 uint32_t __setCR3(uint32_t value);
100 asm(".text\n.align 4\n"
101 "__setCR3:\n"
102 " movl 4(%esp), %eax\n"
103 " movl %eax, %cr3\n"
104 " ret\n"
105 );
106 
107 static inline void __flushTLBSingle(void *addr)
108 {
109  asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
110 }
111 
112 static inline bool __isReserved(struct pagingEntry *table)
113 {
114  return !table->present && (table->avail == PAGING_AVAIL_NOTPRESENT_RESERVED);
115 }
116 
117 
118 static void *__pagingMapPhysMem(struct process *p, uint32_t index, void *addr, bool rw, bool user);
119 
120 /* Returns a pointer to the pagingEntry element for a specific virtual address.
121  * Can be NULL if there is no page table for the specific address yet and alloc is set to false */
122 static struct pagingEntry *__getPagingEntry(struct process *p, void *addr, bool alloc)
123 {
124  struct pagingEntry *dir, *table;
125  bool pagingEnabled = (__getCR0() & 0x80000000);
126  uint32_t i;
127 
128  if (pagingEnabled)
129  {
130  /* lookup the paging directory corresponding to the process */
131  dir = (p != NULL) ? p->pageDirectory : (struct pagingEntry *)KERNEL_DIR_ADDR;
132  }
133  else
134  {
135  /* paging not enabled, request for kernel paging directory */
136  assert(p == NULL);
137  dir = (struct pagingEntry *)__getCR3();
138  }
139 
140  i = (uint32_t)addr >> (PAGETABLE_BITS + PAGE_BITS);
141  dir += i;
142 
143  if (!dir->value)
144  {
145  if (!alloc) return NULL;
146 
147  /* allocate a new entry */
148  dir->present = 1;
149  dir->rw = 1;
150  dir->user = 1;
151  dir->frame = physMemAllocPage(false);
152  }
153  else alloc = false;
154 
155  if (!dir->present)
156  {
157  switch (dir->avail)
158  {
160  physMemPageIn(dir->frame);
161  break;
162 
165  default:
166  assert(0);
167  }
168 
169  assert(dir->present);
170  }
171 
172  /* special flags on dir entries not allowed yet */
173  assert(!dir->avail);
174 
175  if (pagingEnabled)
176  {
177  if (p != NULL)
178  {
179  /* map it into the kernel if its not present yet */
180  if (!p->pageTables[i])
181  p->pageTables[i] = __pagingMapPhysMem(NULL, physMemAddRefPage(dir->frame), NULL, true, false);
182 
183  /* then calculate the position of the entry in the page table */
184  table = p->pageTables[i] + (((uint32_t)addr >> PAGE_BITS) & PAGETABLE_MASK);
185  }
186  else
187  {
188  /* kernel mode pages are always mapped into the kernel virtual address space */
189  table = ((struct pagingEntry *)KERNEL_PAGE_ADDR + ((uint32_t)addr >> PAGE_BITS));
190  }
191  }
192  else
193  {
194  /* paging not enabled, request for kernel pages */
195  assert(p == NULL);
196  table = ((struct pagingEntry *)(dir->frame << PAGE_BITS) + (((uint32_t)addr >> PAGE_BITS) & PAGETABLE_MASK));
197  }
198 
199  /* clear the whole page if this is freshly allocated memory */
200  if (alloc)
201  memset((void *)((uint32_t)table & ~PAGE_MASK), 0, PAGE_SIZE);
202 
203  return table;
204 }
205 
206 /* helper for pagingInit */
207 static bool __pagingBootMapCheck(uint32_t startIndex, uint32_t stopIndex)
208 {
209  uint32_t i;
210 
211  /* TODO: binary search would be faster ... */
212  for (i = 0; i < pagingNumBootMaps; i++)
213  {
214  if (stopIndex <= pagingBootMap[i].startIndex)
215  break;
216 
217  if (pagingBootMap[i].startIndex + pagingBootMap[i].length <= startIndex)
218  continue;
219 
220  /* overlapping */
221  return true;
222  }
223 
224  return false;
225 }
226 
227 /* Maps some physical memory to a given addr (or to any) */
228 static void *__pagingMapPhysMem(struct process *p, uint32_t index, void *addr, bool rw, bool user)
229 {
230  struct pagingEntry *table;
231 
232  if (addr)
233  {
234  /* we don't allow mapping something in the NULL page for now */
235  assert(((uint32_t)addr & ~PAGE_MASK) != 0);
236 
237  table = __getPagingEntry(p, addr, true);
238  if (table->value)
239  {
240  SYSTEM_FAILURE(error_virtualAddressInUse, (uint32_t)addr);
241  return NULL;
242  }
243  }
244  else
245  {
246  uint32_t i;
247 
248  /* TODO: Make this more efficient */
249  for (i = 1; i < KERNEL_DIR_ENTRY * PAGETABLE_COUNT; i++)
250  {
251  table = __getPagingEntry(p, (void *)(i << PAGE_BITS), true);
252  if (!table->value)
253  {
254  addr = (void *)(i << PAGE_BITS);
255  break;
256  }
257  }
258 
259  if (!addr)
260  {
261  SYSTEM_FAILURE(error_virtualAddressSpaceFull);
262  return NULL;
263  }
264 
265  }
266 
267  /* reset */
268  table->value = 0;
269 
270  table->present = 1;
271  table->rw = rw;
272  table->user = user;
273  table->frame = index;
274 
275  if (p == NULL) __flushTLBSingle(addr);
276  return addr;
277 }
278 
298 uint32_t interrupt_0x0E(UNUSED uint32_t interrupt, uint32_t error, struct thread *t)
299 {
300  struct process *p = t ? t->process : NULL;
301  struct pagingEntry *table;
302  void *cr2;
303  bool user, write;
304  UNUSED bool present;
305 
306  /* read cr2 */
307  asm volatile("mov %%cr2, %0" : "=r" (cr2));
308 
309  user = (error & 4) != 0;
310  write = (error & 2) != 0;
311  present = (error & 1) != 0;
312 
313  /*
314  consoleWriteString("page fault in ");
315  consoleWriteHex32((uint32_t)cr2);
316  consoleWriteString(", error = ");
317  consoleWriteInt32(error);
318  consoleWriteString("\n");
319  */
320 
321  /* ensure that error code (user / supervisor) matches where we have discovered the error */
322  assert((p != NULL) == user);
323 
324  table = __getPagingEntry(p, cr2, false);
325  if (!table || !table->value) return INTERRUPT_UNHANDLED;
326 
327  /* user has no access to this kernel page */
328  if (!table->user && user) return INTERRUPT_UNHANDLED;
329 
330  if (!table->present)
331  {
332  switch (table->avail)
333  {
335  return INTERRUPT_UNHANDLED;
336 
338  physMemPageIn(table->frame);
339  break;
340 
342  default:
343  assert(0);
344  }
345 
346  assert(table->present);
347  }
348 
349  if (!table->rw && write)
350  {
352  return INTERRUPT_UNHANDLED;
353 
354  /* duplicate this page */
355  table->rw = 1;
356  table->avail = 0;
357 
358  if (!physMemIsLastRef(table->frame))
359  {
360  uint32_t old_index = table->frame;
361  table->frame = physMemAllocPage(false);
362  void *destination = __pagingMapPhysMem(NULL, physMemAddRefPage(table->frame), NULL, true, false);
363  void *source = __pagingMapPhysMem(NULL, physMemAddRefPage(old_index), NULL, true, false);
364 
365  memcpy(destination, source, PAGE_SIZE);
366 
367  pagingReleasePhysMem(NULL, destination, 1);
368  pagingReleasePhysMem(NULL, source, 1);
369  physMemReleasePage(old_index);
370  }
371  }
372 
373  if (p == NULL) __flushTLBSingle(cr2);
374 
376 }
377 
391 void pagingInsertBootMap(uint32_t startIndex, uint32_t stopIndex)
392 {
393  int mapIndex, i;
394 
395  /* adding entries is only allowed when paging is disabled (for now) */
396  assert(!pagingInitialized);
397 
398  /* Check whether we can expand an existing entry */
399  for (mapIndex = 0; mapIndex < (signed)pagingNumBootMaps;)
400  {
401  /* we have to insert before the entry mapIndex */
402  if (stopIndex < pagingBootMap[mapIndex].startIndex)
403  break;
404 
405  /* not yet at the right position to insert elements */
406  if (pagingBootMap[mapIndex].startIndex + pagingBootMap[mapIndex].length < startIndex)
407  {
408  mapIndex++;
409  continue;
410  }
411 
412  /* new region is fully included in an existing entry, nothing to do! */
413  if (startIndex >= pagingBootMap[mapIndex].startIndex && stopIndex <= pagingBootMap[mapIndex].startIndex + pagingBootMap[mapIndex].length)
414  return;
415 
416  /* overlapping area / possible to combine */
417  if (pagingBootMap[mapIndex].startIndex < startIndex)
418  startIndex = pagingBootMap[mapIndex].startIndex;
419 
420  if (pagingBootMap[mapIndex].startIndex + pagingBootMap[mapIndex].length > stopIndex)
421  stopIndex = pagingBootMap[mapIndex].startIndex + pagingBootMap[mapIndex].length;
422 
423  /* remove entry */
424  for (i = mapIndex + 1; i < (signed)pagingNumBootMaps; i++)
425  pagingBootMap[i - 1] = pagingBootMap[i];
426  pagingNumBootMaps--;
427  }
428 
429  /* create a new entry */
430  assert(pagingNumBootMaps < MAX_BOOT_ENTRIES);
431 
432  /* insert entry */
433  for (i = pagingNumBootMaps - 1; i >= mapIndex; i--)
434  pagingBootMap[i + 1] = pagingBootMap[i];
435  pagingNumBootMaps++;
436 
437  pagingBootMap[mapIndex].startIndex = startIndex;
438  pagingBootMap[mapIndex].length = stopIndex - startIndex;
439 }
440 
445 {
446  uint32_t i;
447 
448  consoleWriteString("PROTECTED BOOT ENTRIES:\n\n");
449 
450  for (i = 0; i < pagingNumBootMaps; i++)
451  {
452  consoleWriteHex32(pagingBootMap[i].startIndex << PAGE_BITS);
453  consoleWriteString(" - ");
454  consoleWriteHex32(((pagingBootMap[i].startIndex + pagingBootMap[i].length) << PAGE_BITS) - 1);
455  consoleWriteString("\n");
456  }
457 }
458 
468 {
469  uint32_t pageDirectoryIndex, i;
470  struct pagingEntry *dir;
471  uint32_t index;
472 
473  assert(!pagingInitialized);
474 
475  /* the low and high memory addresses are reserved */
476  assert(!__pagingBootMapCheck(0, 1));
477  assert(!__pagingBootMapCheck(KERNEL_PAGE_ADDR >> PAGE_BITS, PAGE_COUNT - 1));
478 
479  pageDirectoryIndex = physMemAllocPage(false);
480 
481  /* initial setup of the page directory */
482  dir = (struct pagingEntry *)(pageDirectoryIndex << PAGE_BITS);
483  memset(dir, 0, PAGE_SIZE);
484  dir[KERNEL_DIR_ENTRY].present = 1;
485  dir[KERNEL_DIR_ENTRY].rw = 1;
486  dir[KERNEL_DIR_ENTRY].frame = pageDirectoryIndex;
487  __setCR3((uint32_t)dir);
488 
489  /* reserve all the remaining memory regions */
490  for (i = 0; i < pagingNumBootMaps; i++)
491  {
492  for (index = pagingBootMap[i].startIndex; index < pagingBootMap[i].startIndex + pagingBootMap[i].length; index++)
493  __pagingMapPhysMem(NULL, index, (void *)(index << PAGE_BITS), true, false);
494  }
495 
496  /* enable paging */
497  __setCR0(__getCR0() | 0x80000000);
498 
499  /* now mark all the kernel space as unpageable (requires paging to be initialized) */
500  for (i = 0; i < pagingNumBootMaps; i++)
501  {
502  for (index = pagingBootMap[i].startIndex; index < pagingBootMap[i].startIndex + pagingBootMap[i].length; index++)
503  physMemMarkUnpageable(index);
504  }
505 
506  pagingInitialized = true;
507 }
508 
515 {
516  struct pagingEntry *table;
517  uint32_t i;
518 
519  consoleWriteString("PAGE TABLE MAP:\n\n");
520 
521  for (i = 0; i < PAGETABLE_COUNT * PAGETABLE_COUNT; i++)
522  {
523  table = __getPagingEntry(p, (void *)(i << PAGE_BITS), false);
524  if (!table)
525  {
526  i |= PAGETABLE_MASK;
527  }
528  else if (table->value)
529  {
530  /* FIXME: support paging out stuff? */
531  assert(table->present);
532 
534  consoleWriteString(" -> ");
535  consoleWriteHex32(table->frame << PAGE_BITS);
536  consoleWriteString(", ");
537  }
538  }
539 
540 }
541 
550 void pagingReserveArea(struct process *p, void *addr, uint32_t length, bool user)
551 {
552  struct pagingEntry *table;
553  uint8_t *cur;
554 
555  /* TODO: Make this more efficient */
556  for (cur = addr; length; length--, cur += PAGE_SIZE)
557  {
558  table = __getPagingEntry(p, cur, true);
559  assert(!table->value);
560 
561  /* reset */
562  table->value = 0;
563 
564  table->present = 0;
565  table->rw = 0;
566  table->user = user;
568  table->frame = 0;
569 
570  /* we don't clear the TLB since the pointer is still not valid */
571  }
572 }
573 
588 void *pagingSearchArea(struct process *p, uint32_t length)
589 {
590  void *addr = pagingTrySearchArea(p, length);
591 
592  if (!addr)
593  {
594  SYSTEM_FAILURE(error_virtualAddressSpaceFull, length);
595  return NULL;
596  }
597 
598  return addr;
599 }
600 
612 void *pagingTrySearchArea(struct process *p, uint32_t length)
613 {
614  struct pagingEntry *table;
615  uint32_t i, start;
616  void *addr = NULL;
617 
618  /* catch invalid arguments */
619  if (!length)
620  return NULL;
621 
622  /* TODO: Make this more efficient */
623  for (i = 1, start = i; i < KERNEL_DIR_ENTRY * PAGETABLE_COUNT; i++)
624  {
625  table = __getPagingEntry(p, (void *)(i << PAGE_BITS), false);
626 
627  if (table && table->value)
628  start = i + 1;
629  else
630  {
631  if (!table) i |= PAGETABLE_MASK;
632  if (i >= start + length - 1)
633  {
634  addr = (void *)(start << PAGE_BITS);
635  break;
636  }
637  }
638  }
639 
640  return addr;
641 }
642 
659 void *pagingAllocatePhysMem(struct process *p, uint32_t length, bool rw, bool user)
660 {
661  void *addr = pagingTryAllocatePhysMem(p, length, rw, user);
662 
663  if (!addr)
664  {
665  SYSTEM_FAILURE(error_virtualAddressSpaceFull, length);
666  return NULL;
667  }
668 
669  return addr;
670 }
671 
686 void *pagingAllocatePhysMemUnpageable(struct process *p, uint32_t length, bool rw, bool user)
687 {
688  struct pagingEntry *table;
689  void *addr = pagingTrySearchArea(p, length);
690  uint8_t *cur;
691  uint32_t index;
692 
693  if (!addr)
694  {
695  SYSTEM_FAILURE(error_virtualAddressSpaceFull, length);
696  return NULL;
697  }
698 
699  /* reserve the whole area - this is necessary since we want to mark the
700  * memory as unpageable, which will probably again call a memory allocator */
701  pagingReserveArea(p, addr, length, user);
702 
703  for (cur = addr; length; length--, cur += PAGE_SIZE)
704  {
705  index = physMemMarkUnpageable(physMemAllocPage(false));
706  table = __getPagingEntry(p, cur, true);
707  assert(__isReserved(table));
708 
709  /* reset */
710  table->value = 0;
711 
712  table->present = 1;
713  table->rw = rw;
714  table->user = user;
715  table->frame = index;
716 
717  if (p == NULL) __flushTLBSingle(cur);
718  }
719 
720  return addr;
721 }
722 
736 void *pagingTryAllocatePhysMem(struct process *p, uint32_t length, bool rw, bool user)
737 {
738  struct pagingEntry *table;
739  void *addr = pagingTrySearchArea(p, length);
740  uint8_t *cur;
741  uint32_t index;
742 
743  if (!addr) return NULL;
744 
745  for (cur = addr; length; length--, cur += PAGE_SIZE)
746  {
747  index = physMemAllocPage(false);
748  table = __getPagingEntry(p, cur, true);
749  assert(!table->value);
750 
751  /* reset */
752  table->value = 0;
753 
754  table->present = 1;
755  table->rw = rw;
756  table->user = user;
757  table->frame = index;
758 
759  if (p == NULL) __flushTLBSingle(cur);
760  }
761 
762  return addr;
763 }
764 
778 void *pagingAllocatePhysMemFixed(struct process *p, void *addr, uint32_t length, bool rw, bool user)
779 {
780  addr = pagingTryAllocatePhysMemFixed(p, addr, length, rw, user);
781 
782  if (!addr)
783  {
784  SYSTEM_FAILURE(error_virtualAddressInUse, (uint32_t)addr);
785  return NULL;
786  }
787 
788  return addr;
789 }
790 
804 void *pagingAllocatePhysMemFixedUnpageable(struct process *p, void *addr, uint32_t length, bool rw, bool user)
805 {
806  struct pagingEntry *table;
807  uint8_t *cur;
808  uint32_t index;
809 
810  /* we don't allow mapping something in the NULL page for now */
811  assert(((uint32_t)addr & ~PAGE_MASK) != 0);
812 
813  /* reserve the whole area - this is necessary since we want to mark the
814  * memory as unpageable, which will probably again call a memory allocator */
815  pagingReserveArea(p, addr, length, user);
816 
817  for (cur = addr; length; length--, cur += PAGE_SIZE)
818  {
819  index = physMemMarkUnpageable(physMemAllocPage(false));
820  table = __getPagingEntry(p, cur, true);
821  assert(__isReserved(table));
822 
823  /* reset */
824  table->value = 0;
825 
826  table->present = 1;
827  table->rw = rw;
828  table->user = user;
829  table->frame = index;
830 
831  if (p == NULL) __flushTLBSingle(cur);
832  }
833 
834  return addr;
835 }
836 
851 void *pagingTryAllocatePhysMemFixed(struct process *p, void *addr, uint32_t length, bool rw, bool user)
852 {
853  struct pagingEntry *table;
854  uint8_t *cur;
855  uint32_t index;
856 
857  /* we don't allow mapping something in the NULL page for now */
858  if (((uint32_t)addr & ~PAGE_MASK) == 0) return NULL;
859 
860  for (cur = addr; length; length--, cur += PAGE_SIZE)
861  {
862  index = physMemAllocPage(false);
863  table = __getPagingEntry(p, cur, true);
864  if (table->value)
865  {
866  physMemReleasePage(index);
867  pagingReleasePhysMem(NULL, addr, ((uint32_t)cur - (uint32_t)addr) >> PAGE_BITS);
868  return NULL;
869  }
870 
871  /* reset */
872  table->value = 0;
873 
874  table->present = 1;
875  table->rw = rw;
876  table->user = user;
877  table->frame = index;
878 
879  if (p == NULL) __flushTLBSingle(cur);
880  }
881 
882  return addr;
883 }
884 
885 
886 /* internally used for pagingReAllocatePhysMem */
887 static void *__pagingMove(struct process *p, void *dst_addr, void *src_addr, uint32_t length)
888 {
889  struct pagingEntry *src, *dst;
890  uint8_t *src_cur = src_addr, *dst_cur = dst_addr;
891 
892  /* both regions shouldn't be overlapping */
893  assert( dst_cur + length <= src_cur || src_cur + length <= dst_cur );
894 
895  for (src_cur = src_addr, dst_cur = dst_addr; length; length--, src_cur += PAGE_SIZE, dst_cur += PAGE_SIZE)
896  {
897  src = __getPagingEntry(p, src_cur, false);
898  assert(src && src->value);
899 
900  dst = __getPagingEntry(p, dst_cur, true);
901  assert(!dst->value);
902 
903  /* copy the whole entry to the destination */
904  *dst = *src;
905 
906  /* reset */
907  src->value = 0;
908 
909  if (p == NULL)
910  {
911  __flushTLBSingle(src_cur);
912  __flushTLBSingle(dst_cur);
913  }
914  }
915 
916  return (void *)((uint32_t)dst_addr | ((uint32_t)src_addr & PAGE_MASK));
917 }
918 
937 void *pagingReAllocatePhysMem(struct process *p, void *addr, uint32_t old_length, uint32_t new_length, bool rw, bool user)
938 {
939  struct pagingEntry *table;
940  uint8_t *cur;
941  void *new_addr;
942  uint32_t index;
943 
944  if (old_length < new_length)
945  {
946  /* allocate an area if addr is a NULL pointer */
947  if (!addr) addr = pagingSearchArea(p, new_length);
948 
949  for (cur = (uint8_t *)addr + (old_length << PAGE_BITS); old_length < new_length; old_length++, cur += PAGE_SIZE)
950  {
951  index = physMemAllocPage(false);
952  table = __getPagingEntry(p, cur, true);
953  if (table->value)
954  {
955  /* we have a collision, before we can proceed we need to move everything to a new virtual memory location */
956  new_addr = pagingSearchArea(p, new_length);
957  addr = __pagingMove(p, new_addr, addr, old_length);
958 
959  /* now update the current frame */
960  cur = (uint8_t *)addr + (old_length << PAGE_BITS);
961 
962  /* and retry the operation */
963  table = __getPagingEntry(p, cur, true);
964  assert(!table->value);
965  }
966 
967  /* reset */
968  table->value = 0;
969 
970  table->present = 1;
971  table->rw = rw;
972  table->user = user;
973  table->frame = index;
974 
975  if (p == NULL) __flushTLBSingle(cur);
976  }
977  }
978  else
979  {
980  for (cur = (uint8_t *)addr + (new_length << PAGE_BITS); new_length < old_length; old_length--, cur += PAGE_SIZE)
981  {
982  table = __getPagingEntry(p, cur, false);
983  assert(table && table->value);
984 
985  if (!table->present)
986  {
987  switch (table->avail)
988  {
990  table->value = 0;
991  if (p == NULL) __flushTLBSingle(cur);
992  continue;
993 
995  physMemPageIn(table->frame);
996  break;
997 
999  default:
1000  assert(0);
1001  }
1002 
1003  assert(table->present);
1004  }
1005 
1006  /* reset */
1007  index = table->frame;
1008  table->value = 0;
1009 
1010  physMemReleasePage(index);
1011 
1012  if (p == NULL) __flushTLBSingle(cur);
1013  }
1014 
1015  /* return NULL if the memory pointer is now invalid */
1016  if (new_length == 0)
1017  addr = NULL;
1018  }
1019 
1020  return addr;
1021 }
1022 
1034 void pagingReleasePhysMem(struct process *p, void *addr, uint32_t length)
1035 {
1036  assert(pagingTryReleasePhysMem(p, addr, length));
1037 }
1038 
1051 bool pagingTryReleasePhysMem(struct process *p, void *addr, uint32_t length)
1052 {
1053  struct pagingEntry *table;
1054  uint8_t *cur;
1055  uint32_t index;
1056  bool success = true;
1057 
1058  for (cur = addr; length; length--, cur += PAGE_SIZE)
1059  {
1060  table = __getPagingEntry(p, cur, false);
1061  if (!table || !table->value)
1062  {
1063  success = false;
1064  continue;
1065  }
1066 
1067  if (!table->present)
1068  {
1069  switch (table->avail)
1070  {
1073  table->value = 0;
1074  if (p == NULL) __flushTLBSingle(cur);
1075  continue;
1076 
1078  physMemPageIn(table->frame);
1079  break;
1080 
1081  default:
1082  assert(0);
1083  }
1084 
1085  assert(table->present);
1086  }
1087 
1088  index = table->frame;
1089  table->value = 0;
1090 
1091  physMemReleasePage(index);
1092 
1093  if (p == NULL) __flushTLBSingle(cur);
1094  }
1095 
1096  return success;
1097 }
1098 
1111 uint32_t pagingGetPhysMem(struct process *p, void *addr)
1112 {
1113  struct pagingEntry *table;
1114 
1115  table = __getPagingEntry(p, addr, false);
1116  assert(table && table->value);
1117 
1118  if (!table->present)
1119  {
1120  switch (table->avail)
1121  {
1123  physMemPageIn(table->frame);
1124  break;
1125 
1128  default:
1129  assert(0);
1130  }
1131 
1132  assert(table->present);
1133  }
1134 
1135  return table->frame;
1136 }
1137 
1150 void *pagingMapRemoteMemory(struct process *dst_p, struct process *src_p, void *dst_addr, void *src_addr, uint32_t length, bool rw, bool user)
1151 {
1152  struct pagingEntry *src, *dst;
1153  uint8_t *src_cur, *dst_cur;
1154 
1155  if (!dst_addr)
1156  dst_addr = pagingSearchArea(dst_p, length);
1157 
1158  /* we don't allow mapping something in the NULL page for now */
1159  if (((uint32_t)dst_addr & ~PAGE_MASK) == 0) return NULL;
1160 
1161  /* reserve the whole area - this is necessary since we want to mark the
1162  * memory as unpageable, which will probably again call a memory allocator */
1163  pagingReserveArea(dst_p, dst_addr, length, user);
1164 
1165  for (src_cur = src_addr, dst_cur = dst_addr; length; length--, src_cur += PAGE_SIZE, dst_cur += PAGE_SIZE)
1166  {
1167  src = __getPagingEntry(src_p, src_cur, false);
1168  assert(src && src->value);
1169 
1170  dst = __getPagingEntry(dst_p, dst_cur, true);
1171  assert(__isReserved(dst));
1172 
1173  if (!src->present)
1174  {
1175  switch (src->avail)
1176  {
1178  physMemPageIn(src->frame);
1179  break;
1180 
1183  default:
1184  assert(0);
1185  }
1186 
1187  assert(src->present);
1188  }
1189 
1190  if (rw && !src->rw && src->avail == PAGING_AVAIL_PRESENT_ON_WRITE_DUPLICATE)
1191  {
1192  /* duplicate this page */
1193  src->rw = 1;
1194  src->avail = 0;
1195 
1196  if (!physMemIsLastRef(src->frame))
1197  {
1198  uint32_t old_index = src->frame;
1199  src->frame = physMemAllocPage(false);
1200  void *destination = __pagingMapPhysMem(NULL, physMemAddRefPage(src->frame), NULL, true, false);
1201  void *source = __pagingMapPhysMem(NULL, physMemAddRefPage(old_index), NULL, true, false);
1202 
1203  memcpy(destination, source, PAGE_SIZE);
1204 
1205  pagingReleasePhysMem(NULL, destination, 1);
1206  pagingReleasePhysMem(NULL, source, 1);
1207  physMemReleasePage(old_index);
1208  }
1209  }
1210 
1211  /* copy the whole entry to the destination */
1212  *dst = *src;
1213 
1214  /* adjust permissions */
1215  dst->rw = rw;
1216  dst->user = user;
1217 
1218  /* increase refcount */
1219  physMemAddRefPage(dst->frame);
1220 
1221  if (dst_p == NULL) __flushTLBSingle(dst_cur);
1222  }
1223 
1224  return (void *)((uint32_t)dst_addr | ((uint32_t)src_addr & PAGE_MASK));
1225 }
1226 
1237 {
1238  bool pagingEnabled = (__getCR0() & 0x80000000);
1239  uint32_t i;
1240 
1241  assert(pagingEnabled && p != NULL);
1242  assert(p->pageDirectory == NULL);
1243 
1244  p->pageDirectory = pagingAllocatePhysMem(NULL, 1, true, false);
1245  memset(p->pageDirectory, 0, PAGE_SIZE);
1246 
1247  for (i = 0; i < PAGETABLE_COUNT; i++)
1248  p->pageTables[i] = NULL;
1249 }
1250 
1261 void pagingForkProcessPageTable(struct process *destination, struct process *source)
1262 {
1263  bool pagingEnabled = (__getCR0() & 0x80000000);
1264  struct pagingEntry *src, *dst;
1265  uint32_t i;
1266 
1267  assert(pagingEnabled && destination != NULL && source != NULL);
1268  assert(source->pageDirectory != NULL);
1269  assert(destination->pageDirectory == NULL);
1270 
1271  destination->pageDirectory = pagingAllocatePhysMem(NULL, 1, true, false);
1272  memset(destination->pageDirectory, 0, PAGE_SIZE);
1273 
1274  for (i = 0; i < PAGETABLE_COUNT; i++)
1275  destination->pageTables[i] = NULL;
1276 
1277  /* TODO: Make this more efficient */
1278  for (i = 0; i < PAGETABLE_COUNT * PAGETABLE_COUNT; i++)
1279  {
1280  src = __getPagingEntry(source, (void *)(i << PAGE_BITS), false);
1281  if (!src)
1282  {
1283  i |= PAGETABLE_MASK;
1284  }
1285  else if (src->value)
1286  {
1287  dst = __getPagingEntry(destination, (void *)(i << PAGE_BITS), true);
1288  assert(!dst->value);
1289 
1290  if (!src->present)
1291  {
1292  switch (src->avail)
1293  {
1296  *dst = *src;
1297  break;
1298 
1300  physMemPageIn(src->frame);
1301  break;
1302 
1303  default:
1304  assert(0);
1305  }
1306 
1307  assert(src->present);
1308  }
1309 
1310  switch (src->avail)
1311  {
1312  case 0:
1313  if (src->rw)
1314  {
1315  src->rw = 0;
1317  }
1318  *dst = *src;
1319  physMemAddRefPage(dst->frame);
1320  break;
1321 
1324  *dst = *src;
1325  physMemAddRefPage(dst->frame);
1326  break;
1327 
1329  break;
1330 
1331  default:
1332  assert(0);
1333  }
1334  }
1335  }
1336 }
1337 
1345 {
1346  bool pagingEnabled = (__getCR0() & 0x80000000);
1347  struct pagingEntry *table;
1348  uint32_t index, i;
1349 
1350  assert(pagingEnabled && p != NULL);
1351  assert(p->pageDirectory);
1352 
1353  /* TODO: Make this more efficient */
1354  for (i = 0; i < PAGETABLE_COUNT * PAGETABLE_COUNT; i++)
1355  {
1356  table = __getPagingEntry(p, (void *)(i << PAGE_BITS), false);
1357  if (!table)
1358  {
1359  i |= PAGETABLE_MASK;
1360  }
1361  else if (table->value)
1362  {
1363 
1364  if (!table->present)
1365  {
1366  switch (table->avail)
1367  {
1370  continue;
1371 
1373  physMemPageIn(table->frame);
1374  break;
1375 
1376  default:
1377  assert(0);
1378  }
1379 
1380  assert(table->present);
1381  }
1382 
1383  /* reset */
1384  index = table->frame;
1385  table->value = 0;
1386 
1387  physMemReleasePage(index);
1388  }
1389  }
1390 
1391  for (i = 0; i < PAGETABLE_COUNT; i++)
1392  {
1393  if (p->pageTables[i])
1394  {
1395  pagingReleasePhysMem(NULL, p->pageTables[i], 1);
1396  p->pageTables[i] = NULL;
1397  }
1398 
1399  if (p->pageDirectory[i].value)
1400  {
1401 
1402  if (!p->pageDirectory[i].present)
1403  {
1404  switch (p->pageDirectory[i].avail)
1405  {
1408  break;
1409 
1412  default:
1413  assert(0);
1414  }
1415 
1416  assert(p->pageDirectory[i].present);
1417  }
1418 
1419  index = p->pageDirectory[i].frame;
1420  p->pageDirectory[i].value = 0;
1421 
1422  physMemReleasePage(index);
1423  }
1424  }
1425 
1426  pagingReleasePhysMem(NULL, p->pageDirectory, 1);
1427  p->pageDirectory = NULL;
1428 }
1429 
1436 void pagingFillProcessInfo(struct process *p, struct processInfo *info)
1437 {
1438  bool pagingEnabled = (__getCR0() & 0x80000000);
1439  struct pagingEntry *table;
1440  uint32_t i;
1441 
1442  assert(pagingEnabled);
1443  assert(info);
1444 
1445  /* reset memory fields */
1446  info->pagesPhysical = 0;
1447  info->pagesShared = 0;
1448  info->pagesNoFork = 0;
1449  info->pagesReserved = 0;
1450  info->pagesOutpaged = 0;
1451 
1452  /* no page directory found */
1453  if (p && !p->pageDirectory) return;
1454 
1455  /* TODO: Make this more efficient */
1456  for (i = 0; i < PAGETABLE_COUNT * PAGETABLE_COUNT; i++)
1457  {
1458  table = __getPagingEntry(p, (void *)(i << PAGE_BITS), false);
1459  if (!table)
1460  {
1461  i |= PAGETABLE_MASK;
1462  }
1463  else if (table->value)
1464  {
1465  if (table->present)
1466  {
1467  switch (table->avail)
1468  {
1469  case 0:
1470  info->pagesPhysical++;
1471  break;
1472 
1475  info->pagesShared++;
1476  break;
1477 
1479  info->pagesNoFork++;
1480  break;
1481  }
1482 
1483  }
1484  else
1485  {
1486  switch (table->avail)
1487  {
1490  info->pagesReserved++;
1491  break;
1492 
1494  info->pagesOutpaged++;
1495  break;
1496 
1497  default:
1498  assert(0);
1499  }
1500 
1501  }
1502  }
1503  }
1504 
1505 }
1506 
1520 void *pagingTryMapUserMem(struct process *src_p, void *src_addr, uint32_t length, bool rw)
1521 {
1522  struct pagingEntry *src, *dst;
1523  uint8_t *src_cur, *dst_cur;
1524  void *dst_addr = pagingSearchArea(NULL, length);
1525 
1526  /* reserve the whole area - this is necessary since we want to mark the
1527  * memory as unpageable, which will probably again call a memory allocator */
1528  pagingReserveArea(NULL, dst_addr, length, false);
1529 
1530  for (src_cur = src_addr, dst_cur = dst_addr; length; length--, src_cur += PAGE_SIZE, dst_cur += PAGE_SIZE)
1531  {
1532  src = __getPagingEntry(src_p, src_cur, false);
1533  if (!src || !src->value || !src->user) goto invalid;
1534 
1535  dst = __getPagingEntry(NULL, dst_cur, true);
1536  assert(__isReserved(dst));
1537 
1538  if (!src->present)
1539  {
1540  switch (src->avail)
1541  {
1543  goto invalid;
1544 
1546  physMemPageIn(src->frame);
1547  break;
1548 
1550  default:
1551  assert(0);
1552  }
1553 
1554  assert(src->present);
1555  }
1556 
1557 
1558  if (rw && !src->rw)
1559  {
1561  goto invalid;
1562 
1563  /* duplicate this page */
1564  src->rw = 1;
1565  src->avail = 0;
1566 
1567  if (!physMemIsLastRef(src->frame))
1568  {
1569  uint32_t old_index = src->frame;
1570  src->frame = physMemAllocPage(false);
1571  void *destination = __pagingMapPhysMem(NULL, physMemAddRefPage(src->frame), NULL, true, false);
1572  void *source = __pagingMapPhysMem(NULL, physMemAddRefPage(old_index), NULL, true, false);
1573 
1574  memcpy(destination, source, PAGE_SIZE);
1575 
1576  pagingReleasePhysMem(NULL, destination, 1);
1577  pagingReleasePhysMem(NULL, source, 1);
1578  physMemReleasePage(old_index);
1579  }
1580  }
1581 
1582  /* copy the whole entry to the destination */
1583  *dst = *src;
1584 
1585  /* adjust permissions */
1586  dst->rw = rw;
1587  dst->user = false;
1588 
1589  /* increase refcount */
1590  physMemAddRefPage(dst->frame);
1591 
1592  __flushTLBSingle(dst_cur);
1593  }
1594 
1595  return (void *)((uint32_t)dst_addr | ((uint32_t)src_addr & PAGE_MASK));
1596 
1597 invalid:
1598  /* exploit attempt, pointer is not valid in user space for ring3 */
1599  pagingReleasePhysMem(NULL, dst_addr, (((uint32_t)dst_cur - (uint32_t)dst_addr) >> PAGE_BITS) + length);
1600  return NULL;
1601 }
1602 
1615 bool pagingTryReleaseUserMem(struct process *p, void *addr, uint32_t length)
1616 {
1617  struct pagingEntry *table;
1618  uint8_t *cur;
1619  uint32_t index;
1620  bool success = true;
1621 
1622  for (cur = addr; length; length--, cur += PAGE_SIZE)
1623  {
1624  table = __getPagingEntry(p, cur, false);
1625  if (!table || !table->value || !table->user)
1626  {
1627  success = false;
1628  continue;
1629  }
1630 
1631  if (!table->present)
1632  {
1633  switch (table->avail)
1634  {
1637  table->value = 0;
1638  if (p == NULL) __flushTLBSingle(cur);
1639  continue;
1640 
1642  physMemPageIn(table->frame);
1643  break;
1644 
1645  default:
1646  assert(0);
1647  }
1648 
1649  assert(table->present);
1650  }
1651 
1652  /* reset */
1653  index = table->frame;
1654  table->value = 0;
1655 
1656  physMemReleasePage(index);
1657 
1658  if (p == NULL) __flushTLBSingle(cur);
1659  }
1660 
1661  return success;
1662 }
#define PAGETABLE_COUNT
Definition: paging.h:38
void pagingDumpPageTable(struct process *p)
Dumps information about the page table of a specific process.
Definition: paging.c:514
#define PAGETABLE_MASK
Definition: paging.h:36
void * pagingReAllocatePhysMem(struct process *p, void *addr, uint32_t old_length, uint32_t new_length, bool rw, bool user)
Reallocates a specific range of virtual memory in a process.
Definition: paging.c:937
void pagingReleaseProcessPageTable(struct process *p)
Releases the page directory and page table of a specific process.
Definition: paging.c:1344
uint32_t value
Definition: paging.h:65
uint32_t physMemReleasePage(uint32_t index)
Releases a page of physical memory.
Definition: physmem.c:426
#define MAX_BOOT_ENTRIES
Definition: paging.c:43
void pagingAllocProcessPageTable(struct process *p)
Allocates the page directory and page table for a specific process.
Definition: paging.c:1236
uint32_t present
Definition: paging.h:55
void pagingInit()
Initializes paging.
Definition: paging.c:467
#define PAGE_MASK
Definition: physmem.h:36
uint32_t physMemMarkUnpageable(uint32_t index)
Marks a physical page as unpageable.
Definition: physmem.c:486
#define assert(ex)
Definition: util.h:61
void pagingReserveArea(struct process *p, void *addr, uint32_t length, bool user)
Marks the memory in a specific memory area as reserved.
Definition: paging.c:550
void * pagingAllocatePhysMemFixedUnpageable(struct process *p, void *addr, uint32_t length, bool rw, bool user)
Allocates several pages of unpageable physical memory at a fixed virtual address in a process...
Definition: paging.c:804
#define UNUSED
Definition: util.h:39
void * memset(void *ptr, int value, size_t num)
Fills a memory region with some specific byte value.
Definition: util.c:123
uint32_t rw
Definition: paging.h:46
void * pagingAllocatePhysMemFixed(struct process *p, void *addr, uint32_t length, bool rw, bool user)
Allocates several pages of physical memory at a fixed virtual address in a process.
Definition: paging.c:778
#define INTERRUPT_UNHANDLED
Definition: interrupt.h:46
uint32_t physMemPageIn(UNUSED uint32_t hdd_index)
Pages in some data from the hard drive.
Definition: physmem.c:535
uint32_t startIndex
Definition: paging.c:39
void * pagingAllocatePhysMemUnpageable(struct process *p, uint32_t length, bool rw, bool user)
Allocates several pages of unpageable physical memory in a process.
Definition: paging.c:686
void pagingDumpBootMap()
Dumps a list of all entries in the boot map.
Definition: paging.c:444
uint8_t user
Definition: gdt.h:133
#define PAGE_SIZE
Definition: physmem.h:35
uint32_t __getCR0()
uint32_t __getCR3()
struct pagingEntry * pageDirectory
Definition: process.h:75
bool physMemIsLastRef(uint32_t index)
Checks if a physical page is only referenced exactly one time.
Definition: physmem.c:510
uint32_t pagesNoFork
Definition: process.h:39
void * pagingAllocatePhysMem(struct process *p, uint32_t length, bool rw, bool user)
Allocates several pages of physical memory in a process.
Definition: paging.c:659
bool pagingTryReleaseUserMem(struct process *p, void *addr, uint32_t length)
Releases several pages of physical memory of a process.
Definition: paging.c:1615
uint32_t interrupt_0x0E(UNUSED uint32_t interrupt, uint32_t error, struct thread *t)
Page fault handler.
Definition: paging.c:298
#define PAGE_BITS
Definition: physmem.h:37
uint32_t pagesOutpaged
Definition: process.h:41
uint32_t pagesReserved
Definition: process.h:40
uint32_t avail
Definition: paging.h:62
void * pagingSearchArea(struct process *p, uint32_t length)
Searches for a consecutive area of length free pages in a process.
Definition: paging.c:588
bool pagingTryReleasePhysMem(struct process *p, void *addr, uint32_t length)
Releases several pages of physical memory of a process.
Definition: paging.c:1051
Definition: thread.h:47
#define PAGING_AVAIL_NOTPRESENT_ON_ACCESS_CREATE
Definition: paging.c:69
uint32_t physMemAddRefPage(uint32_t index)
Increment the refcounter of a physical page.
Definition: physmem.c:460
void * pagingTryAllocatePhysMem(struct process *p, uint32_t length, bool rw, bool user)
Tries to allocates several pages of physical memory in a process.
Definition: paging.c:736
uint8_t present
Definition: gdt.h:129
uint32_t __setCR3(uint32_t value)
#define KERNEL_DIR_ENTRY
Definition: paging.c:47
uint32_t rw
Definition: paging.h:56
void * pagingTryAllocatePhysMemFixed(struct process *p, void *addr, uint32_t length, bool rw, bool user)
Allocates several pages of unpageable physical memory at a fixed virtual address in a process...
Definition: paging.c:851
void * pagingTrySearchArea(struct process *p, uint32_t length)
Searches for a consecutive area of length free pages in a process.
Definition: paging.c:612
uint32_t pagesShared
Definition: process.h:38
uint32_t length
Definition: paging.c:40
#define PAGING_AVAIL_NOTPRESENT_RESERVED
Definition: paging.c:68
void pagingReleasePhysMem(struct process *p, void *addr, uint32_t length)
Releases several pages of physical memory in a process.
Definition: paging.c:1034
#define KERNEL_DIR_ADDR
Definition: paging.c:48
uint32_t __setCR0(uint32_t value)
struct pagingEntry * pageTables[PAGETABLE_COUNT]
Definition: process.h:76
#define PAGING_AVAIL_PRESENT_NO_FORK
Definition: paging.c:74
void consoleWriteHex32(uint32_t value)
Write a 32 bit integer as hex value on the console.
Definition: console.c:303
#define PAGING_AVAIL_NOTPRESENT_OUTPAGED
Definition: paging.c:70
void * memcpy(void *destination, const void *source, size_t num)
Copies a block of memory from source to destination.
Definition: util.c:141
#define SYSTEM_FAILURE(lines,...)
Definition: util.h:71
uint32_t frame
Definition: paging.h:63
#define PAGE_COUNT
Definition: physmem.h:38
#define KERNEL_PAGE_ADDR
Definition: paging.c:49
uint32_t physMemAllocPage(bool lowmem)
Allocates a page of physical memory.
Definition: physmem.c:383
struct process * process
Definition: thread.h:54
uint32_t pagingGetPhysMem(struct process *p, void *addr)
Returns the physical page index for a virtual address.
Definition: paging.c:1111
void consoleWriteString(const char *str)
Write a C string to the console.
Definition: console.c:240
uint32_t pagesPhysical
Definition: process.h:37
#define INTERRUPT_CONTINUE_EXECUTION
Definition: interrupt.h:48
#define PAGETABLE_BITS
Definition: paging.h:37
uint32_t user
Definition: paging.h:57
void * pagingTryMapUserMem(struct process *src_p, void *src_addr, uint32_t length, bool rw)
Maps some virtual memory of a usermode process into the kernel.
Definition: paging.c:1520
void pagingForkProcessPageTable(struct process *destination, struct process *source)
Duplicate a page table and assigns it to a destination process.
Definition: paging.c:1261
uint32_t value
Definition: paging.h:55
void pagingInsertBootMap(uint32_t startIndex, uint32_t stopIndex)
Appends a specific range of physical pages to the bootmap.
Definition: paging.c:391
void pagingFillProcessInfo(struct process *p, struct processInfo *info)
Fills out all memory related fields in the processInfo structure.
Definition: paging.c:1436
#define PAGING_AVAIL_PRESENT_SHARED
Definition: paging.c:73
void * pagingMapRemoteMemory(struct process *dst_p, struct process *src_p, void *dst_addr, void *src_addr, uint32_t length, bool rw, bool user)
Maps some virtual memory from one process to another one.
Definition: paging.c:1150
#define PAGING_AVAIL_PRESENT_ON_WRITE_DUPLICATE
Definition: paging.c:75