IBNOS
allocator.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2014, Michael Müller
3  * Copyright (c) 2014, Sebastian Lackner
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  * list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
19  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  */
27 
28 #include <memory/physmem.h>
29 #include <memory/paging.h>
30 #include <util/util.h>
31 #include <util/list.h>
32 
39 #define HEAP_ALIGN_SIZE 16
40 #define HEAP_ALIGN_MASK (HEAP_ALIGN_SIZE - 1)
41 #define SMALL_HEAP_MAGIC 0xFEEFABB1
42 #define LARGE_HEAP_MAGIC 0xFEEFABB2
43 
44 struct heapEntry
45 {
46  uint32_t heapMagic;
47  struct linkedList entry;
48  uint32_t length : 31;
49  uint32_t reserved : 1;
50 };
51 
52 /*
53  * Small block heap
54  */
55 
56 static struct linkedList smallHeap = LL_INIT(smallHeap);
57 
58 static struct linkedList smallUnusedHeap32 = LL_INIT(smallUnusedHeap32);
59 static struct linkedList smallUnusedHeap64 = LL_INIT(smallUnusedHeap64);
60 static struct linkedList smallUnusedHeap128 = LL_INIT(smallUnusedHeap128);
61 static struct linkedList smallUnusedHeap256 = LL_INIT(smallUnusedHeap256);
62 static struct linkedList smallUnusedHeap512 = LL_INIT(smallUnusedHeap512);
63 static struct linkedList smallUnusedHeap1024 = LL_INIT(smallUnusedHeap1024);
64 
65 static inline struct heapEntry *__smallFindUnusedHeapEntry(uint32_t length)
66 {
67  struct linkedList *list;
68 
69  if (length <= 32 && !ll_empty(&smallUnusedHeap32))
70  list = &smallUnusedHeap32;
71  else if (length <= 64 && !ll_empty(&smallUnusedHeap64))
72  list = &smallUnusedHeap64;
73  else if (length <= 128 && !ll_empty(&smallUnusedHeap128))
74  list = &smallUnusedHeap128;
75  else if (length <= 256 && !ll_empty(&smallUnusedHeap256))
76  list = &smallUnusedHeap256;
77  else if (length <= 512 && !ll_empty(&smallUnusedHeap512))
78  list = &smallUnusedHeap512;
79  else if (length <= 1024 && !ll_empty(&smallUnusedHeap1024))
80  list = &smallUnusedHeap1024;
81  else
82  return NULL;
83 
84  /* return first entry of the corresponding list */
85  return LL_ENTRY(ll_remove(list->next), struct heapEntry, entry);
86 }
87 
88 static inline struct heapEntry *__smallQueueUnusedHeap(struct heapEntry *heap, uint32_t length)
89 {
90  struct linkedList *list;
91 
92  if (((uint32_t)heap & PAGE_MASK) == 0 && length == PAGE_SIZE)
93  {
94  /* unmap and free this page */
95  pagingReleasePhysMem(NULL, heap, 1);
96  return NULL;
97  }
98 
99  if (length >= 1024)
100  list = &smallUnusedHeap1024;
101  else if (length >= 512)
102  list = &smallUnusedHeap512;
103  else if (length >= 256)
104  list = &smallUnusedHeap256;
105  else if (length >= 128)
106  list = &smallUnusedHeap128;
107  else if (length >= 64)
108  list = &smallUnusedHeap64;
109  else if (length >= 32)
110  list = &smallUnusedHeap32;
111  else
112  assert(0);
113 
114  /* otherwise remember this frame */
115  heap->heapMagic = SMALL_HEAP_MAGIC;
116  ll_add_after(list, &heap->entry);
117  heap->length = length;
118  heap->reserved = 0;
119 
120  return heap;
121 }
122 
123 static inline struct heapEntry *__smallGetPreviousHeap(struct heapEntry *heap)
124 {
125  struct heapEntry *prev_heap = (struct heapEntry *)((uint32_t)heap & ~PAGE_MASK);
126 
127  while (prev_heap < heap)
128  {
129  assert(prev_heap->heapMagic == SMALL_HEAP_MAGIC);
130  assert((prev_heap->length & HEAP_ALIGN_MASK) == 0);
131 
132  if ((uint32_t)prev_heap + prev_heap->length >= (uint32_t)heap)
133  return prev_heap;
134 
135  prev_heap = (struct heapEntry *)((uint32_t)prev_heap + prev_heap->length);
136  }
137 
138  /* at this point the prev_heap should match heap, otherwise the structure is corrupted */
139  assert(prev_heap == heap);
140  return prev_heap;
141 }
142 
143 static inline struct heapEntry *__smallGetNextHeap(struct heapEntry *heap, uint32_t length)
144 {
145  struct heapEntry *next_heap = (struct heapEntry *)((uint32_t)heap + length);
146  if ((uint32_t)next_heap >= ((uint32_t)heap & ~PAGE_MASK) + PAGE_SIZE) return NULL;
147 
148  assert(next_heap->heapMagic == SMALL_HEAP_MAGIC);
149  assert((next_heap->length & HEAP_ALIGN_MASK) == 0);
150 
151  return next_heap;
152 }
153 
154 /* internally used to free memory */
155 static struct heapEntry *__smallInternalFree(struct heapEntry *deleted_heap, uint32_t length)
156 {
157  struct heapEntry *heap;
158 
159  assert(((uint32_t)deleted_heap & HEAP_ALIGN_MASK) == 0);
160  assert((length & HEAP_ALIGN_MASK) == 0 && length > 0);
161 
162  /* step 1: try to merge with previous free areas */
163  heap = __smallGetPreviousHeap(deleted_heap);
164  if (heap && !heap->reserved)
165  {
166  ll_remove(&heap->entry);
167  length = (uint32_t)deleted_heap + length - (uint32_t)heap;
168  deleted_heap = heap;
169  }
170 
171  /* step 2: merge with consecutive areas */
172  heap = __smallGetNextHeap(deleted_heap, length);
173  if (heap && !heap->reserved)
174  {
175  ll_remove(&heap->entry);
176  length = (uint32_t)heap + heap->length - (uint32_t)deleted_heap;
177  }
178 
179  return __smallQueueUnusedHeap(deleted_heap, length);
180 }
181 
182 static struct heapEntry *__smallAlloc(uint32_t length)
183 {
184  struct heapEntry *heap;
185  uint32_t origHeapLength;
186 
187  /* try to find a free area which has the minimum required size */
188  length += sizeof(struct heapEntry);
189  length = (length + HEAP_ALIGN_MASK) & ~HEAP_ALIGN_MASK;
190  heap = __smallFindUnusedHeapEntry(length);
191 
192  /* we found a block */
193  if (heap)
194  {
195  /* ensure that the heap is not corrupted */
197  assert((heap->length & HEAP_ALIGN_MASK) == 0);
198  assert(heap->length >= length);
199  assert(!heap->reserved);
200  origHeapLength = heap->length;
201  }
202  else
203  {
204  heap = pagingAllocatePhysMem(NULL, 1, true, false);
205  origHeapLength = PAGE_SIZE;
206  }
207 
208  /* use full memory if the remaining space is too small to be useful */
209  if (origHeapLength < length + sizeof(struct heapEntry) + 16)
210  length = origHeapLength;
211 
212  heap->heapMagic = SMALL_HEAP_MAGIC;
213  ll_add_after(&smallHeap, &heap->entry);
214  heap->length = length;
215  heap->reserved = 1;
216 
217  /* give the rest back to the owner */
218  if (length < origHeapLength)
219  __smallInternalFree((struct heapEntry *)((uint32_t)heap + length), origHeapLength - length);
220 
221  return heap;
222 }
223 
224 static void __smallFree(struct heapEntry *heap)
225 {
227  assert((heap->length & HEAP_ALIGN_MASK) == 0);
228  assert(heap->reserved);
229 
230  ll_remove(&heap->entry);
231  __smallInternalFree(heap, heap->length);
232 }
233 
234 static struct heapEntry *__smallReAlloc(struct heapEntry *heap, uint32_t length)
235 {
236  struct heapEntry *next_heap;
237  uint32_t origHeapLength;
238 
240  assert((heap->length & HEAP_ALIGN_MASK) == 0);
241  assert(heap->reserved);
242 
243  /* it makes more sense to store this in a large heap entry */
244  if (length > 1024 - sizeof(struct heapEntry))
245  return NULL;
246 
247  length += sizeof(struct heapEntry);
248  length = (length + HEAP_ALIGN_MASK) & ~HEAP_ALIGN_MASK;
249 
250  origHeapLength = heap->length;
251  if (length > origHeapLength)
252  {
253  /* necessary to increase size, take a look at the next entry afterwards */
254  next_heap = __smallGetNextHeap(heap, heap->length);
255  if (!next_heap || next_heap->reserved) return NULL;
256  if (length > (uint32_t)next_heap + next_heap->length - (uint32_t)heap) return NULL;
257 
258  /* temporarily let our current block span both of them */
259  ll_remove(&next_heap->entry);
260  origHeapLength = (uint32_t)next_heap + next_heap->length - (uint32_t)heap;
261  }
262 
263  /* at this point the total buffer should be sufficient */
264  assert(origHeapLength >= length);
265 
266  /* use full memory if the remaining space is too small to be useful */
267  if (origHeapLength < length + sizeof(struct heapEntry) + 16)
268  length = origHeapLength;
269 
270  /* element is still in the smallList */
271  heap->heapMagic = SMALL_HEAP_MAGIC;
272  heap->length = length;
273  heap->reserved = 1;
274 
275  /* give the rest back to the owner */
276  if (length < origHeapLength)
277  __smallInternalFree((struct heapEntry *)((uint32_t)heap + length), origHeapLength - length);
278 
279  return heap;
280 }
281 
282 /*
283  * Large block heap
284  */
285 
286 static struct linkedList largeHeap = LL_INIT(largeHeap);
287 
288 static struct heapEntry *__largeAlloc(uint32_t length)
289 {
290  struct heapEntry *heap;
291 
292  /* add overhead for entry and round up to next page boundary */
293  length += sizeof(struct heapEntry);
294  length = (length + PAGE_MASK) & ~PAGE_MASK;
295  heap = pagingAllocatePhysMem(NULL, length >> PAGE_BITS, true, false);
296 
297  /* pur into our vector of allocated blocks */
298  heap->heapMagic = LARGE_HEAP_MAGIC;
299  ll_add_after(&largeHeap, &heap->entry);
300  heap->length = length;
301  heap->reserved = 1;
302 
303  return heap;
304 }
305 
306 static void __largeFree(struct heapEntry *heap)
307 {
308  /* uint32_t length; */
309 
310  assert(((uint32_t)heap & PAGE_MASK) == 0);
312  assert((heap->length & PAGE_MASK) == 0);
313  assert(heap->reserved);
314 
315  ll_remove(&heap->entry);
316 
317  pagingReleasePhysMem(NULL, heap, heap->length >> PAGE_BITS);
318 }
319 
320 static struct heapEntry *__largeReAlloc(struct heapEntry *heap, uint32_t length)
321 {
322  assert(((uint32_t)heap & PAGE_MASK) == 0);
324  assert((heap->length & PAGE_MASK) == 0);
325 
326  /* it makes more sense to store this in a small heap entry */
327  if (length <= 1024 - sizeof(struct heapEntry))
328  return NULL;
329 
330  /* add overhead for entry and round up to next page boundary */
331  length += sizeof(struct heapEntry);
332  length = (length + PAGE_MASK) & ~PAGE_MASK;
333 
334  /* fast-path */
335  if (length == heap->length)
336  return heap;
337 
338  /* temporarily remove the entry while we're relocating */
339  ll_remove(&heap->entry);
340 
341  /* let the paging layer find some better spot for this */
342  heap = pagingReAllocatePhysMem(NULL, heap, heap->length >> PAGE_BITS, length >> PAGE_BITS, true, false);
343 
344  /* update all the values */
346  ll_add_after(&largeHeap, &heap->entry);
347  heap->length = length;
348  heap->reserved = 1;
349 
350  return heap;
351 }
352 
363 void *heapAlloc(uint32_t length)
364 {
365  struct heapEntry *heap;
366  if (!length) return NULL;
367 
368  if (length <= 1024 - sizeof(struct heapEntry))
369  heap = __smallAlloc(length);
370  else
371  heap = __largeAlloc(length);
372 
373  return heap ? (void *)((uint32_t)heap + sizeof(struct heapEntry)) : NULL;
374 }
375 
385 void heapFree(void *addr)
386 {
387  struct heapEntry *heap;
388  if (!addr) return;
389 
390  assert(((uint32_t)addr & PAGE_MASK) >= sizeof(struct heapEntry));
391 
392  heap = (struct heapEntry *)((uint32_t)addr - sizeof(struct heapEntry));
394  assert(heap->reserved);
395 
396  if (heap->heapMagic == SMALL_HEAP_MAGIC)
397  __smallFree(heap);
398  else
399  __largeFree(heap);
400 }
401 
411 uint32_t heapSize(void *addr)
412 {
413  struct heapEntry *heap;
414  if (!addr) return 0;
415 
416  assert(((uint32_t)addr & PAGE_MASK) >= sizeof(struct heapEntry));
417 
418  heap = (struct heapEntry *)((uint32_t)addr - sizeof(struct heapEntry));
420  assert(heap->reserved);
421 
422  return heap->length - sizeof(struct heapEntry);
423 }
424 
439 void *heapReAlloc(void *addr, uint32_t length)
440 {
441  struct heapEntry *heap, *new_heap = NULL;
442  void *new_addr;
443 
444  /* no addr given -> allocate memory, no length -> free memory */
445  if (!addr)
446  return heapAlloc(length);
447 
448  if (!length)
449  {
450  heapFree(addr);
451  return NULL;
452  }
453 
454  assert(((uint32_t)addr & PAGE_MASK) >= sizeof(struct heapEntry));
455 
456  heap = (struct heapEntry *)((uint32_t)addr - sizeof(struct heapEntry));
458  assert(heap->reserved);
459 
460  if (heap->heapMagic == SMALL_HEAP_MAGIC)
461  new_heap = __smallReAlloc(heap, length);
462  else
463  new_heap = __largeReAlloc(heap, length);
464 
465  /* return the new heap if we had success */
466  if (new_heap)
467  return (void *)((uint32_t)new_heap + sizeof(struct heapEntry));
468 
469  /* no faster method, we allocate a new block and copy the data */
470  new_addr = heapAlloc(length);
471  if (!new_addr) return NULL;
472 
473  /* copy data */
474  if (length > (heap->length - sizeof(struct heapEntry)))
475  length = (heap->length - sizeof(struct heapEntry));
476  memcpy(new_addr, addr, length);
477 
478  heapFree(addr);
479  return new_addr;
480 }
481 
486 {
487  struct heapEntry *heap;
488 
489  LL_FOR_EACH(heap, &smallHeap, struct heapEntry, entry)
490  {
491  assert(((uint32_t)heap & HEAP_ALIGN_MASK) == 0);
493  assert(heap->length >= sizeof(struct heapEntry) + 16);
494  assert(heap->reserved);
495  }
496 
497  LL_FOR_EACH(heap, &largeHeap, struct heapEntry, entry)
498  {
499  assert(((uint32_t)heap & PAGE_MASK) == 0);
501  assert(heap->length > 0 && (heap->length & PAGE_MASK) == 0);
502  assert(heap->reserved);
503  }
504 
505  #define VALIDATE_UNUSED_LIST(unused_list) \
506  do \
507  { \
508  LL_FOR_EACH(heap, unused_list, struct heapEntry, entry) \
509  { \
510  assert(((uint32_t)heap & HEAP_ALIGN_MASK) == 0); \
511  assert(heap->heapMagic == SMALL_HEAP_MAGIC); \
512  assert(heap->length >= sizeof(struct heapEntry) + 16); \
513  assert(!heap->reserved); \
514  } \
515  } \
516  while(0)
517 
518  VALIDATE_UNUSED_LIST(&smallUnusedHeap32);
519  VALIDATE_UNUSED_LIST(&smallUnusedHeap64);
520  VALIDATE_UNUSED_LIST(&smallUnusedHeap128);
521  VALIDATE_UNUSED_LIST(&smallUnusedHeap256);
522  VALIDATE_UNUSED_LIST(&smallUnusedHeap512);
523  VALIDATE_UNUSED_LIST(&smallUnusedHeap1024);
524 
525  #undef VALIDATE_UNUSED_LIST
526 }
527 
uint32_t heapSize(void *addr)
Determines the size of a specific kernel memory block.
Definition: allocator.c:411
void * pagingReAllocatePhysMem(struct process *p, void *addr, uint32_t old_length, uint32_t new_length, bool rw, bool user)
Reallocates a specific range of virtual memory in a process.
Definition: paging.c:937
void * heapReAlloc(void *addr, uint32_t length)
Resizes a block of kernel memory.
Definition: allocator.c:439
#define PAGE_MASK
Definition: physmem.h:36
#define assert(ex)
Definition: util.h:61
uint32_t reserved
Definition: allocator.c:49
#define PAGE_SIZE
Definition: physmem.h:35
void * pagingAllocatePhysMem(struct process *p, uint32_t length, bool rw, bool user)
Allocates several pages of physical memory in a process.
Definition: paging.c:659
#define SMALL_HEAP_MAGIC
Definition: allocator.c:41
#define PAGE_BITS
Definition: physmem.h:37
#define LL_INIT(list)
Initializes a linkedList.
Definition: list.h:124
#define LL_ENTRY(element, type, field)
Definition: list.h:127
void * heapAlloc(uint32_t length)
Allocates a block of kernel memory.
Definition: allocator.c:363
void pagingReleasePhysMem(struct process *p, void *addr, uint32_t length)
Releases several pages of physical memory in a process.
Definition: paging.c:1034
struct linkedList * next
Definition: list.h:36
#define LARGE_HEAP_MAGIC
Definition: allocator.c:42
void heapVerify()
Runs some internal checks to ensure that the heap is still valid.
Definition: allocator.c:485
void * memcpy(void *destination, const void *source, size_t num)
Copies a block of memory from source to destination.
Definition: util.c:141
uint32_t length
Definition: allocator.c:48
#define VALIDATE_UNUSED_LIST(unused_list)
uint32_t heapMagic
Definition: allocator.c:46
#define HEAP_ALIGN_MASK
Definition: allocator.c:40
struct linkedList entry
Definition: allocator.c:47
void heapFree(void *addr)
Deallocates a block of kernel memory.
Definition: allocator.c:385
#define LL_FOR_EACH(element, list, type, field)
Allows to iterate a linkedList similar to a for-loop.
Definition: list.h:138