- tlsf malloc project: https://github.com/mattconte/tlsf
- last commit when sync'ed:
- https://github.com/mattconte/tlsf/commit/deff9ab509341f264addbd3c8ada533678591905
... | ... |
@@ -1,5 +1,20 @@ |
1 | 1 |
#if defined(TLSF_MALLOC) |
2 | 2 |
|
3 |
+/** |
|
4 |
+ * Copyright inside tlsf_malloc.h |
|
5 |
+ */ |
|
6 |
+ |
|
7 |
+/** |
|
8 |
+ * Sync'ed on Apr 6, 2021 with https://github.com/mattconte/tlsf |
|
9 |
+ * - last commit: |
|
10 |
+ * https://github.com/mattconte/tlsf/commit/deff9ab509341f264addbd3c8ada533678591905 |
|
11 |
+ * |
|
12 |
+ * Summary of Kamailio specific changes: |
|
13 |
+ * - fields to keep the usage statistics |
|
14 |
+ * - alloc info structure for debug (file name, line, ...) |
|
15 |
+ * - prototypes with debug info parameters |
|
16 |
+ */ |
|
17 |
+ |
|
3 | 18 |
#include <assert.h> |
4 | 19 |
#include <limits.h> |
5 | 20 |
#include <stddef.h> |
... | ... |
@@ -7,15 +22,207 @@ |
7 | 22 |
#include <stdlib.h> |
8 | 23 |
#include <string.h> |
9 | 24 |
|
10 |
-#include "tlsf_malloc.h" |
|
11 |
-#include "tlsf_malloc_bits.h" |
|
12 |
-#include "src_loc.h" |
|
13 |
-#include "memdbg.h" |
|
14 |
-#include "memapi.h" |
|
15 | 25 |
#include "../dprint.h" |
16 | 26 |
#include "../cfg/cfg.h" |
17 | 27 |
#include "../globals.h" |
18 | 28 |
|
29 |
+#include "src_loc.h" |
|
30 |
+#include "memdbg.h" |
|
31 |
+#include "memapi.h" |
|
32 |
+ |
|
33 |
+#include "tlsf_malloc.h" |
|
34 |
+ |
|
35 |
+#if defined(__cplusplus) |
|
36 |
+#define tlsf_decl inline |
|
37 |
+#else |
|
38 |
+#define tlsf_decl static |
|
39 |
+#endif |
|
40 |
+ |
|
41 |
+/* |
|
42 |
+** Architecture-specific bit manipulation routines. |
|
43 |
+** |
|
44 |
+** TLSF achieves O(1) cost for malloc and free operations by limiting |
|
45 |
+** the search for a free block to a free list of guaranteed size |
|
46 |
+** adequate to fulfill the request, combined with efficient free list |
|
47 |
+** queries using bitmasks and architecture-specific bit-manipulation |
|
48 |
+** routines. |
|
49 |
+** |
|
50 |
+** Most modern processors provide instructions to count leading zeroes |
|
51 |
+** in a word, find the lowest and highest set bit, etc. These |
|
52 |
+** specific implementations will be used when available, falling back |
|
53 |
+** to a reasonably efficient generic implementation. |
|
54 |
+** |
|
55 |
+** NOTE: TLSF spec relies on ffs/fls returning value 0..31. |
|
56 |
+** ffs/fls return 1-32 by default, returning 0 for error. |
|
57 |
+*/ |
|
58 |
+ |
|
59 |
+/* |
|
60 |
+** Detect whether or not we are building for a 32- or 64-bit (LP/LLP) |
|
61 |
+** architecture. There is no reliable portable method at compile-time. |
|
62 |
+*/ |
|
63 |
+#if defined (__alpha__) || defined (__ia64__) || defined (__x86_64__) \ |
|
64 |
+ || defined (_WIN64) || defined (__LP64__) || defined (__LLP64__) |
|
65 |
+#define TLSF_64BIT |
|
66 |
+#endif |
|
67 |
+ |
|
68 |
+/* |
|
69 |
+** gcc 3.4 and above have builtin support, specialized for architecture. |
|
70 |
+** Some compilers masquerade as gcc; patchlevel test filters them out. |
|
71 |
+*/ |
|
72 |
+#if defined (__GNUC__) && (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) \ |
|
73 |
+ && defined (__GNUC_PATCHLEVEL__) |
|
74 |
+ |
|
75 |
+#if defined (__SNC__) |
|
76 |
+/* SNC for Playstation 3. */ |
|
77 |
+ |
|
78 |
+tlsf_decl int tlsf_ffs(unsigned int word) |
|
79 |
+{ |
|
80 |
+ const unsigned int reverse = word & (~word + 1); |
|
81 |
+ const int bit = 32 - __builtin_clz(reverse); |
|
82 |
+ return bit - 1; |
|
83 |
+} |
|
84 |
+ |
|
85 |
+#else |
|
86 |
+ |
|
87 |
+tlsf_decl int tlsf_ffs(unsigned int word) |
|
88 |
+{ |
|
89 |
+ return __builtin_ffs(word) - 1; |
|
90 |
+} |
|
91 |
+ |
|
92 |
+#endif |
|
93 |
+ |
|
94 |
+tlsf_decl int tlsf_fls(unsigned int word) |
|
95 |
+{ |
|
96 |
+ const int bit = word ? 32 - __builtin_clz(word) : 0; |
|
97 |
+ return bit - 1; |
|
98 |
+} |
|
99 |
+ |
|
100 |
+#elif defined (_MSC_VER) && (_MSC_VER >= 1400) && (defined (_M_IX86) || defined (_M_X64)) |
|
101 |
+/* Microsoft Visual C++ support on x86/X64 architectures. */ |
|
102 |
+ |
|
103 |
+#include <intrin.h> |
|
104 |
+ |
|
105 |
+#pragma intrinsic(_BitScanReverse) |
|
106 |
+#pragma intrinsic(_BitScanForward) |
|
107 |
+ |
|
108 |
+tlsf_decl int tlsf_fls(unsigned int word) |
|
109 |
+{ |
|
110 |
+ unsigned long index; |
|
111 |
+ return _BitScanReverse(&index, word) ? index : -1; |
|
112 |
+} |
|
113 |
+ |
|
114 |
+tlsf_decl int tlsf_ffs(unsigned int word) |
|
115 |
+{ |
|
116 |
+ unsigned long index; |
|
117 |
+ return _BitScanForward(&index, word) ? index : -1; |
|
118 |
+} |
|
119 |
+ |
|
120 |
+#elif defined (_MSC_VER) && defined (_M_PPC) |
|
121 |
+/* Microsoft Visual C++ support on PowerPC architectures. */ |
|
122 |
+ |
|
123 |
+#include <ppcintrinsics.h> |
|
124 |
+ |
|
125 |
+tlsf_decl int tlsf_fls(unsigned int word) |
|
126 |
+{ |
|
127 |
+ const int bit = 32 - _CountLeadingZeros(word); |
|
128 |
+ return bit - 1; |
|
129 |
+} |
|
130 |
+ |
|
131 |
+tlsf_decl int tlsf_ffs(unsigned int word) |
|
132 |
+{ |
|
133 |
+ const unsigned int reverse = word & (~word + 1); |
|
134 |
+ const int bit = 32 - _CountLeadingZeros(reverse); |
|
135 |
+ return bit - 1; |
|
136 |
+} |
|
137 |
+ |
|
138 |
+#elif defined (__ARMCC_VERSION) |
|
139 |
+/* RealView Compilation Tools for ARM */ |
|
140 |
+ |
|
141 |
+tlsf_decl int tlsf_ffs(unsigned int word) |
|
142 |
+{ |
|
143 |
+ const unsigned int reverse = word & (~word + 1); |
|
144 |
+ const int bit = 32 - __clz(reverse); |
|
145 |
+ return bit - 1; |
|
146 |
+} |
|
147 |
+ |
|
148 |
+tlsf_decl int tlsf_fls(unsigned int word) |
|
149 |
+{ |
|
150 |
+ const int bit = word ? 32 - __clz(word) : 0; |
|
151 |
+ return bit - 1; |
|
152 |
+} |
|
153 |
+ |
|
154 |
+#elif defined (__ghs__) |
|
155 |
+/* Green Hills support for PowerPC */ |
|
156 |
+ |
|
157 |
+#include <ppc_ghs.h> |
|
158 |
+ |
|
159 |
+tlsf_decl int tlsf_ffs(unsigned int word) |
|
160 |
+{ |
|
161 |
+ const unsigned int reverse = word & (~word + 1); |
|
162 |
+ const int bit = 32 - __CLZ32(reverse); |
|
163 |
+ return bit - 1; |
|
164 |
+} |
|
165 |
+ |
|
166 |
+tlsf_decl int tlsf_fls(unsigned int word) |
|
167 |
+{ |
|
168 |
+ const int bit = word ? 32 - __CLZ32(word) : 0; |
|
169 |
+ return bit - 1; |
|
170 |
+} |
|
171 |
+ |
|
172 |
+#else |
|
173 |
+/* Fall back to generic implementation. */ |
|
174 |
+ |
|
175 |
+tlsf_decl int tlsf_fls_generic(unsigned int word) |
|
176 |
+{ |
|
177 |
+ int bit = 32; |
|
178 |
+ |
|
179 |
+ if (!word) bit -= 1; |
|
180 |
+ if (!(word & 0xffff0000)) { word <<= 16; bit -= 16; } |
|
181 |
+ if (!(word & 0xff000000)) { word <<= 8; bit -= 8; } |
|
182 |
+ if (!(word & 0xf0000000)) { word <<= 4; bit -= 4; } |
|
183 |
+ if (!(word & 0xc0000000)) { word <<= 2; bit -= 2; } |
|
184 |
+ if (!(word & 0x80000000)) { word <<= 1; bit -= 1; } |
|
185 |
+ |
|
186 |
+ return bit; |
|
187 |
+} |
|
188 |
+ |
|
189 |
+/* Implement ffs in terms of fls. */ |
|
190 |
+tlsf_decl int tlsf_ffs(unsigned int word) |
|
191 |
+{ |
|
192 |
+ return tlsf_fls_generic(word & (~word + 1)) - 1; |
|
193 |
+} |
|
194 |
+ |
|
195 |
+tlsf_decl int tlsf_fls(unsigned int word) |
|
196 |
+{ |
|
197 |
+ return tlsf_fls_generic(word) - 1; |
|
198 |
+} |
|
199 |
+ |
|
200 |
+#endif |
|
201 |
+ |
|
202 |
+/* Possibly 64-bit version of tlsf_fls. */ |
|
203 |
+#if defined (TLSF_64BIT) |
|
204 |
+tlsf_decl int tlsf_fls_sizet(size_t size) |
|
205 |
+{ |
|
206 |
+ int high = (int)(size >> 32); |
|
207 |
+ int bits = 0; |
|
208 |
+ if (high) |
|
209 |
+ { |
|
210 |
+ bits = 32 + tlsf_fls(high); |
|
211 |
+ } |
|
212 |
+ else |
|
213 |
+ { |
|
214 |
+ bits = tlsf_fls((int)size & 0xffffffff); |
|
215 |
+ |
|
216 |
+ } |
|
217 |
+ return bits; |
|
218 |
+} |
|
219 |
+#else |
|
220 |
+#define tlsf_fls_sizet tlsf_fls |
|
221 |
+#endif |
|
222 |
+ |
|
223 |
+#undef tlsf_decl |
|
224 |
+ |
|
225 |
+ |
|
19 | 226 |
/* |
20 | 227 |
** Constants. |
21 | 228 |
*/ |
... | ... |
@@ -23,7 +230,10 @@ |
23 | 230 |
/* Public constants: may be modified. */ |
24 | 231 |
enum tlsf_public |
25 | 232 |
{ |
26 |
- /* log2 of number of linear subdivisions of block sizes. */ |
|
233 |
+ /* log2 of number of linear subdivisions of block sizes. Larger |
|
234 |
+ ** values require more memory in the control structure. Values of |
|
235 |
+ ** 4 or 5 are typical. |
|
236 |
+ */ |
|
27 | 237 |
SL_INDEX_COUNT_LOG2 = 5, |
28 | 238 |
}; |
29 | 239 |
|
... | ... |
@@ -104,14 +314,16 @@ tlsf_static_assert(ALIGN_SIZE == SMALL_BLOCK_SIZE / SL_INDEX_COUNT); |
104 | 314 |
/* |
105 | 315 |
** Data structures and associated constants. |
106 | 316 |
*/ |
317 |
+ |
|
107 | 318 |
#ifdef DBG_TLSF_MALLOC |
108 |
-typedef struct { |
|
319 |
+typedef struct alloc_info { |
|
109 | 320 |
const char* file; |
110 | 321 |
const char* func; |
111 | 322 |
const char* mname; |
112 | 323 |
unsigned int line; |
113 | 324 |
} alloc_info_t; |
114 | 325 |
#endif |
326 |
+ |
|
115 | 327 |
/* |
116 | 328 |
** Block header structure. |
117 | 329 |
** |
... | ... |
@@ -171,20 +383,30 @@ static const size_t block_size_min = |
171 | 383 |
sizeof(block_header_t) - sizeof(block_header_t*); |
172 | 384 |
static const size_t block_size_max = tlsf_cast(size_t, 1) << FL_INDEX_MAX; |
173 | 385 |
|
174 |
-#define TLSF_INCREASE_REAL_USED(control, increment) do {control->real_used += (increment) ; control->max_used = tlsf_max(control->real_used, control->max_used);}while(0) |
|
175 |
-#define TLSF_INCREASE_FRAGMENTS(control) do {control->fragments++ ; control->max_fragments = tlsf_max(control->fragments, control->max_fragments);}while(0) |
|
386 |
+#define TLSF_INCREASE_REAL_USED(control, increment) do { \ |
|
387 |
+ control->real_used += (increment); \ |
|
388 |
+ control->max_used = tlsf_max(control->real_used, control->max_used); \ |
|
389 |
+ } while(0) |
|
390 |
+#define TLSF_INCREASE_FRAGMENTS(control) do { \ |
|
391 |
+ control->fragments++; \ |
|
392 |
+ control->max_fragments = tlsf_max(control->fragments, \ |
|
393 |
+ control->max_fragments);\ |
|
394 |
+ } while(0) |
|
176 | 395 |
|
177 | 396 |
/* The TLSF control structure. */ |
178 | 397 |
typedef struct control_t |
179 | 398 |
{ |
180 | 399 |
/* Empty lists point at this block to indicate they are free. */ |
181 | 400 |
block_header_t block_null; |
401 |
+ |
|
402 |
+ /* Kamailio statistics */ |
|
182 | 403 |
size_t total_size; |
183 | 404 |
size_t allocated; |
184 | 405 |
size_t real_used; |
185 | 406 |
size_t max_used; |
186 | 407 |
size_t fragments; |
187 | 408 |
size_t max_fragments; |
409 |
+ |
|
188 | 410 |
/* Bitmaps for free lists. */ |
189 | 411 |
unsigned int fl_bitmap; |
190 | 412 |
unsigned int sl_bitmap[FL_INDEX_COUNT]; |
... | ... |
@@ -213,7 +435,7 @@ static void block_set_size(block_header_t* block, size_t size) |
213 | 435 |
|
214 | 436 |
static int block_is_last(const block_header_t* block) |
215 | 437 |
{ |
216 |
- return 0 == block_size(block); |
|
438 |
+ return block_size(block) == 0; |
|
217 | 439 |
} |
218 | 440 |
|
219 | 441 |
static int block_is_free(const block_header_t* block) |
... | ... |
@@ -267,6 +489,7 @@ static block_header_t* offset_to_block(const void* ptr, size_t size) |
267 | 489 |
/* Return location of previous block. */ |
268 | 490 |
static block_header_t* block_prev(const block_header_t* block) |
269 | 491 |
{ |
492 |
+ tlsf_assert(block_is_prev_free(block) && "previous block must be free"); |
|
270 | 493 |
return block->prev_phys_block; |
271 | 494 |
} |
272 | 495 |
|
... | ... |
@@ -274,7 +497,7 @@ static block_header_t* block_prev(const block_header_t* block) |
274 | 497 |
static block_header_t* block_next(const block_header_t* block) |
275 | 498 |
{ |
276 | 499 |
block_header_t* next = offset_to_block(block_to_ptr(block), |
277 |
- block_size(block) - sizeof(block_header_t*)); |
|
500 |
+ block_size(block) - block_header_overhead); |
|
278 | 501 |
tlsf_assert(!block_is_last(block)); |
279 | 502 |
return next; |
280 | 503 |
} |
... | ... |
@@ -329,10 +552,15 @@ static void* align_ptr(const void* ptr, size_t align) |
329 | 552 |
static size_t adjust_request_size(size_t size, size_t align) |
330 | 553 |
{ |
331 | 554 |
size_t adjust = 0; |
332 |
- if (size && size < block_size_max) |
|
555 |
+ if (size) |
|
333 | 556 |
{ |
334 | 557 |
const size_t aligned = align_up(size, align); |
335 |
- adjust = tlsf_max(aligned, block_size_min); |
|
558 |
+ |
|
559 |
+ /* aligned sized must not exceed block_size_max or we'll go out of bounds on sl_bitmap */ |
|
560 |
+ if (aligned < block_size_max) |
|
561 |
+ { |
|
562 |
+ adjust = tlsf_max(aligned, block_size_min); |
|
563 |
+ } |
|
336 | 564 |
} |
337 | 565 |
return adjust; |
338 | 566 |
} |
... | ... |
@@ -364,7 +592,7 @@ static void mapping_insert(size_t size, int* fli, int* sli) |
364 | 592 |
/* This version rounds up to the next block size (for allocations) */ |
365 | 593 |
static void mapping_search(size_t size, int* fli, int* sli) |
366 | 594 |
{ |
367 |
- if (size >= (1 << SL_INDEX_COUNT_LOG2)) |
|
595 |
+ if (size >= SMALL_BLOCK_SIZE) |
|
368 | 596 |
{ |
369 | 597 |
const size_t round = (1 << (tlsf_fls_sizet(size) - SL_INDEX_COUNT_LOG2)) - 1; |
370 | 598 |
size += round; |
... | ... |
@@ -381,11 +609,11 @@ static block_header_t* search_suitable_block(control_t* control, int* fli, int* |
381 | 609 |
** First, search for a block in the list associated with the given |
382 | 610 |
** fl/sl index. |
383 | 611 |
*/ |
384 |
- unsigned int sl_map = control->sl_bitmap[fl] & (~0 << sl); |
|
612 |
+ unsigned int sl_map = control->sl_bitmap[fl] & (~0U << sl); |
|
385 | 613 |
if (!sl_map) |
386 | 614 |
{ |
387 | 615 |
/* No block exists. Search in the next largest first-level list. */ |
388 |
- const unsigned int fl_map = control->fl_bitmap & (~0 << (fl + 1)); |
|
616 |
+ const unsigned int fl_map = control->fl_bitmap & (~0U << (fl + 1)); |
|
389 | 617 |
if (!fl_map) |
390 | 618 |
{ |
391 | 619 |
/* No free blocks available, memory has been exhausted. */ |
... | ... |
@@ -451,8 +679,9 @@ static void insert_free_block(control_t* control, block_header_t* block, int fl, |
451 | 679 |
** and second-level bitmaps appropriately. |
452 | 680 |
*/ |
453 | 681 |
control->blocks[fl][sl] = block; |
454 |
- control->fl_bitmap |= (1 << fl); |
|
455 |
- control->sl_bitmap[fl] |= (1 << sl); |
|
682 |
+ control->fl_bitmap |= (1U << fl); |
|
683 |
+ control->sl_bitmap[fl] |= (1U << sl); |
|
684 |
+ |
|
456 | 685 |
TLSF_INCREASE_FRAGMENTS(control); |
457 | 686 |
} |
458 | 687 |
|
... | ... |
@@ -482,7 +711,7 @@ static block_header_t* block_split(block_header_t* block, size_t size) |
482 | 711 |
{ |
483 | 712 |
/* Calculate the amount of space left in the remaining block. */ |
484 | 713 |
block_header_t* remaining = |
485 |
- offset_to_block(block_to_ptr(block), size - sizeof(block_header_t*)); |
|
714 |
+ offset_to_block(block_to_ptr(block), size - block_header_overhead); |
|
486 | 715 |
|
487 | 716 |
const size_t remain_size = block_size(block) - (size + block_header_overhead); |
488 | 717 |
|
... | ... |
@@ -491,6 +720,7 @@ static block_header_t* block_split(block_header_t* block, size_t size) |
491 | 720 |
|
492 | 721 |
tlsf_assert(block_size(block) == remain_size + size + block_header_overhead); |
493 | 722 |
block_set_size(remaining, remain_size); |
723 |
+ tlsf_assert(block_size(remaining) >= block_size_min && "block split with invalid size"); |
|
494 | 724 |
|
495 | 725 |
block_set_size(block, size); |
496 | 726 |
block_mark_as_free(remaining); |
... | ... |
@@ -501,7 +731,7 @@ static block_header_t* block_split(block_header_t* block, size_t size) |
501 | 731 |
/* Absorb a free block's storage into an adjacent previous free block. */ |
502 | 732 |
static block_header_t* block_absorb(block_header_t* prev, block_header_t* block) |
503 | 733 |
{ |
504 |
- tlsf_assert(!block_is_last(prev) && "previous block can't be last!"); |
|
734 |
+ tlsf_assert(!block_is_last(prev) && "previous block can't be last"); |
|
505 | 735 |
/* Note: Leaves flags untouched. */ |
506 | 736 |
prev->size += block_size(block) + block_header_overhead; |
507 | 737 |
block_link_next(prev); |
... | ... |
@@ -531,7 +761,7 @@ static block_header_t* block_merge_next(control_t* control, block_header_t* bloc |
531 | 761 |
|
532 | 762 |
if (block_is_free(next)) |
533 | 763 |
{ |
534 |
- tlsf_assert(!block_is_last(block) && "previous block can't be last!"); |
|
764 |
+ tlsf_assert(!block_is_last(block) && "previous block can't be last"); |
|
535 | 765 |
block_remove(control, next); |
536 | 766 |
block = block_absorb(block, next); |
537 | 767 |
} |
... | ... |
@@ -577,6 +807,22 @@ static void block_trim_used(control_t* control, block_header_t* block, size_t si |
577 | 807 |
} |
578 | 808 |
} |
579 | 809 |
|
810 |
+static block_header_t* block_trim_free_leading(control_t* control, block_header_t* block, size_t size) |
|
811 |
+{ |
|
812 |
+ block_header_t* remaining_block = block; |
|
813 |
+ if (block_can_split(block, size)) |
|
814 |
+ { |
|
815 |
+ /* We want the 2nd block. */ |
|
816 |
+ remaining_block = block_split(block, size - block_header_overhead); |
|
817 |
+ block_set_prev_free(remaining_block); |
|
818 |
+ |
|
819 |
+ block_link_next(block); |
|
820 |
+ block_insert(control, block); |
|
821 |
+ } |
|
822 |
+ |
|
823 |
+ return remaining_block; |
|
824 |
+} |
|
825 |
+ |
|
580 | 826 |
static block_header_t* block_locate_free(control_t* control, size_t size) |
581 | 827 |
{ |
582 | 828 |
int fl = 0, sl = 0; |
... | ... |
@@ -585,7 +831,17 @@ static block_header_t* block_locate_free(control_t* control, size_t size) |
585 | 831 |
if (size) |
586 | 832 |
{ |
587 | 833 |
mapping_search(size, &fl, &sl); |
588 |
- block = search_suitable_block(control, &fl, &sl); |
|
834 |
+ |
|
835 |
+ /* |
|
836 |
+ ** mapping_search can futz with the size, so for excessively large sizes it can sometimes wind up |
|
837 |
+ ** with indices that are off the end of the block array. |
|
838 |
+ ** So, we protect against that here, since this is the only callsite of mapping_search. |
|
839 |
+ ** Note that we don't need to check sl, since it comes from a modulo operation that guarantees it's always in range. |
|
840 |
+ */ |
|
841 |
+ if (fl < FL_INDEX_COUNT) |
|
842 |
+ { |
|
843 |
+ block = search_suitable_block(control, &fl, &sl); |
|
844 |
+ } |
|
589 | 845 |
} |
590 | 846 |
|
591 | 847 |
if (block) |
... | ... |
@@ -606,11 +862,13 @@ static void* block_prepare_used(control_t* control, block_header_t* block, size_ |
606 | 862 |
void* p = 0; |
607 | 863 |
if (block) |
608 | 864 |
{ |
865 |
+ tlsf_assert(size && "size must be non-zero"); |
|
609 | 866 |
block_trim_free(control, block, size); |
610 | 867 |
block_mark_as_used(block); |
611 | 868 |
p = block_to_ptr(block); |
612 | 869 |
TLSF_INCREASE_REAL_USED(control, block_size(block) + (p - (void *)block |
613 |
- /* prev_phys_block is melted in the previous block when the current block is used */ |
|
870 |
+ /* prev_phys_block is melted in the previous block |
|
871 |
+ * when the current block is used */ |
|
614 | 872 |
+ sizeof(block->prev_phys_block))); |
615 | 873 |
control->allocated += block_size(block); |
616 | 874 |
#ifdef DBG_TLSF_MALLOC |
... | ... |
@@ -663,6 +921,7 @@ static void integrity_walker(void* ptr, size_t size, int used, void* user) |
663 | 921 |
const size_t this_block_size = block_size(block); |
664 | 922 |
|
665 | 923 |
int status = 0; |
924 |
+ (void)used; |
|
666 | 925 |
tlsf_insist(integ->prev_status == this_prev_status && "prev status incorrect"); |
667 | 926 |
tlsf_insist(size == this_block_size && "block size incorrect"); |
668 | 927 |
|
... | ... |
@@ -682,9 +941,9 @@ int tlsf_check(tlsf_t tlsf) |
682 | 941 |
{ |
683 | 942 |
for (j = 0; j < SL_INDEX_COUNT; ++j) |
684 | 943 |
{ |
685 |
- const int fl_map = control->fl_bitmap & (1 << i); |
|
944 |
+ const int fl_map = control->fl_bitmap & (1U << i); |
|
686 | 945 |
const int sl_list = control->sl_bitmap[i]; |
687 |
- const int sl_map = sl_list & (1 << j); |
|
946 |
+ const int sl_map = sl_list & (1U << j); |
|
688 | 947 |
const block_header_t* block = control->blocks[i][j]; |
689 | 948 |
|
690 | 949 |
/* Check that first- and second-level lists agree. */ |
... | ... |
@@ -733,7 +992,8 @@ static void default_walker(void* ptr, size_t size, int used, void* user) |
733 | 992 |
void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user) |
734 | 993 |
{ |
735 | 994 |
tlsf_walker pool_walker = walker ? walker : default_walker; |
736 |
- block_header_t* block = pool + tlsf_size() - sizeof(block_header_t*); |
|
995 |
+ block_header_t* block = |
|
996 |
+ offset_to_block(pool, -(int)block_header_overhead); |
|
737 | 997 |
|
738 | 998 |
while (block && !block_is_last(block)) |
739 | 999 |
{ |
... | ... |
@@ -770,22 +1030,22 @@ int tlsf_check_pool(pool_t pool) |
770 | 1030 |
** Size of the TLSF structures in a given memory block passed to |
771 | 1031 |
** tlsf_create, equal to the size of a control_t |
772 | 1032 |
*/ |
773 |
-size_t tlsf_size() |
|
1033 |
+size_t tlsf_size(void) |
|
774 | 1034 |
{ |
775 | 1035 |
return sizeof(control_t); |
776 | 1036 |
} |
777 | 1037 |
|
778 |
-size_t tlsf_align_size() |
|
1038 |
+size_t tlsf_align_size(void) |
|
779 | 1039 |
{ |
780 | 1040 |
return ALIGN_SIZE; |
781 | 1041 |
} |
782 | 1042 |
|
783 |
-size_t tlsf_block_size_min() |
|
1043 |
+size_t tlsf_block_size_min(void) |
|
784 | 1044 |
{ |
785 | 1045 |
return block_size_min; |
786 | 1046 |
} |
787 | 1047 |
|
788 |
-size_t tlsf_block_size_max() |
|
1048 |
+size_t tlsf_block_size_max(void) |
|
789 | 1049 |
{ |
790 | 1050 |
return block_size_max; |
791 | 1051 |
} |
... | ... |
@@ -795,12 +1055,12 @@ size_t tlsf_block_size_max() |
795 | 1055 |
** tlsf_add_pool, equal to the overhead of a free block and the |
796 | 1056 |
** sentinel block. |
797 | 1057 |
*/ |
798 |
-size_t tlsf_pool_overhead() |
|
1058 |
+size_t tlsf_pool_overhead(void) |
|
799 | 1059 |
{ |
800 | 1060 |
return 2 * block_header_overhead; |
801 | 1061 |
} |
802 | 1062 |
|
803 |
-size_t tlsf_alloc_overhead() |
|
1063 |
+size_t tlsf_alloc_overhead(void) |
|
804 | 1064 |
{ |
805 | 1065 |
return block_header_overhead; |
806 | 1066 |
} |
... | ... |
@@ -839,17 +1099,19 @@ pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes) |
839 | 1099 |
** so that the prev_phys_block field falls outside of the pool - |
840 | 1100 |
** it will never be used. |
841 | 1101 |
*/ |
842 |
- block = mem - sizeof (size_t);/*offset_to_block(mem, -(tlsfptr_t)block_header_overhead);*/ |
|
1102 |
+ block = offset_to_block(mem, -(tlsfptr_t)block_header_overhead); |
|
843 | 1103 |
block_set_size(block, pool_bytes); |
844 | 1104 |
block_set_free(block); |
845 | 1105 |
block_set_prev_used(block); |
846 | 1106 |
block_insert(tlsf_cast(control_t*, tlsf), block); |
1107 |
+ |
|
847 | 1108 |
tlsf_cast(control_t*, tlsf)->total_size += block_size(block); |
848 | 1109 |
#ifdef DBG_TLSF_MALLOC |
849 | 1110 |
block->alloc_info.file = _SRC_LOC_; |
850 | 1111 |
block->alloc_info.func = _SRC_FUNCTION_; |
851 | 1112 |
block->alloc_info.line = _SRC_LINE_; |
852 | 1113 |
#endif |
1114 |
+ |
|
853 | 1115 |
/* Split the block to create a zero-size sentinel block. */ |
854 | 1116 |
next = block_link_next(block); |
855 | 1117 |
block_set_size(next, 0); |
... | ... |
@@ -872,6 +1134,7 @@ void tlsf_remove_pool(tlsf_t tlsf, pool_t pool) |
872 | 1134 |
|
873 | 1135 |
mapping_insert(block_size(block), &fl, &sl); |
874 | 1136 |
remove_free_block(control, block, fl, sl); |
1137 |
+ |
|
875 | 1138 |
tlsf_cast(control_t*, tlsf)->total_size -= block_size(block); |
876 | 1139 |
} |
877 | 1140 |
|
... | ... |
@@ -901,7 +1164,7 @@ int test_ffs_fls() |
901 | 1164 |
|
902 | 1165 |
if (rv) |
903 | 1166 |
{ |
904 |
- printf("tlsf_create: %x ffs/fls tests failed!\n", rv); |
|
1167 |
+ printf("test_ffs_fls: %x ffs/fls tests failed!\n", rv); |
|
905 | 1168 |
} |
906 | 1169 |
return rv; |
907 | 1170 |
} |
... | ... |
@@ -924,12 +1187,14 @@ tlsf_t tlsf_create(void* mem) |
924 | 1187 |
} |
925 | 1188 |
|
926 | 1189 |
control_construct(tlsf_cast(control_t*, mem)); |
1190 |
+ |
|
927 | 1191 |
tlsf_cast(control_t*, mem)->real_used = tlsf_size(); |
928 | 1192 |
tlsf_cast(control_t*, mem)->max_used = tlsf_size(); |
929 | 1193 |
tlsf_cast(control_t*, mem)->allocated = 0; |
930 | 1194 |
tlsf_cast(control_t*, mem)->total_size = tlsf_size(); |
931 | 1195 |
tlsf_cast(control_t*, mem)->fragments = 0; |
932 | 1196 |
tlsf_cast(control_t*, mem)->max_fragments = 0; |
1197 |
+ |
|
933 | 1198 |
return tlsf_cast(tlsf_t, mem); |
934 | 1199 |
} |
935 | 1200 |
|
... | ... |
@@ -996,6 +1261,73 @@ void* tlsf_mallocxz(tlsf_t tlsf, size_t size) |
996 | 1261 |
} |
997 | 1262 |
|
998 | 1263 |
|
1264 |
+#ifdef DBG_TLSF_MALLOC |
|
1265 |
+void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size, |
|
1266 |
+ const char *file, const char *function, unsigned int line, const char *mname) |
|
1267 |
+#else |
|
1268 |
+void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size) |
|
1269 |
+#endif |
|
1270 |
+{ |
|
1271 |
+ control_t* control = tlsf_cast(control_t*, tlsf); |
|
1272 |
+ const size_t adjust = adjust_request_size(size, ALIGN_SIZE); |
|
1273 |
+ |
|
1274 |
+ /* |
|
1275 |
+ ** We must allocate an additional minimum block size bytes so that if |
|
1276 |
+ ** our free block will leave an alignment gap which is smaller, we can |
|
1277 |
+ ** trim a leading free block and release it back to the pool. We must |
|
1278 |
+ ** do this because the previous physical block is in use, therefore |
|
1279 |
+ ** the prev_phys_block field is not valid, and we can't simply adjust |
|
1280 |
+ ** the size of that block. |
|
1281 |
+ */ |
|
1282 |
+ const size_t gap_minimum = sizeof(block_header_t); |
|
1283 |
+ const size_t size_with_gap = adjust_request_size(adjust + align + gap_minimum, align); |
|
1284 |
+ |
|
1285 |
+ /* |
|
1286 |
+ ** If alignment is less than or equals base alignment, we're done. |
|
1287 |
+ ** If we requested 0 bytes, return null, as tlsf_malloc(0) does. |
|
1288 |
+ */ |
|
1289 |
+ const size_t aligned_size = (adjust && align > ALIGN_SIZE) ? size_with_gap : adjust; |
|
1290 |
+ |
|
1291 |
+ block_header_t* block = block_locate_free(control, aligned_size); |
|
1292 |
+ |
|
1293 |
+ /* This can't be a static assert. */ |
|
1294 |
+ tlsf_assert(sizeof(block_header_t) == block_size_min + block_header_overhead); |
|
1295 |
+ |
|
1296 |
+ if (block) |
|
1297 |
+ { |
|
1298 |
+ void* ptr = block_to_ptr(block); |
|
1299 |
+ void* aligned = align_ptr(ptr, align); |
|
1300 |
+ size_t gap = tlsf_cast(size_t, |
|
1301 |
+ tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr)); |
|
1302 |
+ |
|
1303 |
+ /* If gap size is too small, offset to next aligned boundary. */ |
|
1304 |
+ if (gap && gap < gap_minimum) |
|
1305 |
+ { |
|
1306 |
+ const size_t gap_remain = gap_minimum - gap; |
|
1307 |
+ const size_t offset = tlsf_max(gap_remain, align); |
|
1308 |
+ const void* next_aligned = tlsf_cast(void*, |
|
1309 |
+ tlsf_cast(tlsfptr_t, aligned) + offset); |
|
1310 |
+ |
|
1311 |
+ aligned = align_ptr(next_aligned, align); |
|
1312 |
+ gap = tlsf_cast(size_t, |
|
1313 |
+ tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr)); |
|
1314 |
+ } |
|
1315 |
+ |
|
1316 |
+ if (gap) |
|
1317 |
+ { |
|
1318 |
+ tlsf_assert(gap >= gap_minimum && "gap size too small"); |
|
1319 |
+ block = block_trim_free_leading(control, block, gap); |
|
1320 |
+ } |
|
1321 |
+ } |
|
1322 |
+ |
|
1323 |
+#ifdef DBG_TLSF_MALLOC |
|
1324 |
+ return block_prepare_used(control, block, adjust, file, function, line, mname); |
|
1325 |
+#else |
|
1326 |
+ return block_prepare_used(control, block, adjust); |
|
1327 |
+#endif |
|
1328 |
+} |
|
1329 |
+ |
|
1330 |
+ |
|
999 | 1331 |
#ifdef DBG_TLSF_MALLOC |
1000 | 1332 |
void tlsf_free(tlsf_t tlsf, void* ptr, |
1001 | 1333 |
const char *file, const char *function, unsigned int line, const char *mname) |
... | ... |
@@ -1011,6 +1343,8 @@ void tlsf_free(tlsf_t tlsf, void* ptr) |
1011 | 1343 |
{ |
1012 | 1344 |
control_t* control = tlsf_cast(control_t*, tlsf); |
1013 | 1345 |
block_header_t* block = block_from_ptr(ptr); |
1346 |
+ |
|
1347 |
+ /* tlsf_assert(!block_is_free(block) && "block already marked as free"); */ |
|
1014 | 1348 |
if (block_is_free(block)) { |
1015 | 1349 |
LOG(L_CRIT, "BUG: tlsf_free: freeing already freed pointer (%p)" |
1016 | 1350 |
#ifdef DBG_TLSF_MALLOC |
... | ... |
@@ -1028,6 +1362,7 @@ void tlsf_free(tlsf_t tlsf, void* ptr) |
1028 | 1362 |
} |
1029 | 1363 |
|
1030 | 1364 |
} |
1365 |
+ |
|
1031 | 1366 |
control->allocated -= block_size(block); |
1032 | 1367 |
control->real_used -= (block_size(block) + (ptr - (void *)block |
1033 | 1368 |
/* prev_phys_block is melted in the previous block when the current block is used */ |
... | ... |
@@ -1038,13 +1373,15 @@ void tlsf_free(tlsf_t tlsf, void* ptr) |
1038 | 1373 |
block->alloc_info.line = line; |
1039 | 1374 |
block->alloc_info.mname = mname; |
1040 | 1375 |
#endif |
1376 |
+ |
|
1041 | 1377 |
block_mark_as_free(block); |
1042 | 1378 |
block = block_merge_prev(control, block); |
1043 | 1379 |
block = block_merge_next(control, block); |
1044 | 1380 |
block_insert(control, block); |
1045 | 1381 |
} else { |
1046 | 1382 |
#ifdef DBG_TLSF_MALLOC |
1047 |
- LOG(L_WARN, "tlsf_free: free(0) called from %s: %s(%u)\n", file, function, line); |
|
1383 |
+ LOG(L_WARN, "tlsf_free: free(0) called from %s: %s(%u)\n", |
|
1384 |
+ file, function, line); |
|
1048 | 1385 |
#else |
1049 | 1386 |
LOG(L_WARN, "tlsf_free: free(0) called\n"); |
1050 | 1387 |
#endif |
... | ... |
@@ -1129,6 +1466,7 @@ void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size) |
1129 | 1466 |
{ |
1130 | 1467 |
control->allocated -= block_size(block); |
1131 | 1468 |
control->real_used -= block_size(block); |
1469 |
+ |
|
1132 | 1470 |
/* Do we need to expand to the next block? */ |
1133 | 1471 |
if (adjust > cursize) |
1134 | 1472 |
{ |
... | ... |
@@ -1139,6 +1477,7 @@ void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size) |
1139 | 1477 |
/* Trim the resulting block and return the original pointer. */ |
1140 | 1478 |
block_trim_used(control, block, adjust); |
1141 | 1479 |
p = ptr; |
1480 |
+ |
|
1142 | 1481 |
control->allocated +=block_size(block); |
1143 | 1482 |
TLSF_INCREASE_REAL_USED(control, block_size(block)); |
1144 | 1483 |
} |
... | ... |
@@ -1174,6 +1513,14 @@ void* tlsf_reallocxf(tlsf_t tlsf, void* ptr, size_t size) |
1174 | 1513 |
return r; |
1175 | 1514 |
} |
1176 | 1515 |
|
1516 |
+/* end of included tlsf.c from original project |
|
1517 |
+ * - includes the kamailio mallocxz and reallocxf specific functions */ |
|
1518 |
+ |
|
1519 |
+/* Kamailio memory API functions */ |
|
1520 |
+ |
|
1521 |
+/** |
|
1522 |
+ * |
|
1523 |
+ */ |
|
1177 | 1524 |
void tlsf_meminfo(tlsf_t pool, struct mem_info *info) |
1178 | 1525 |
{ |
1179 | 1526 |
control_t* control = tlsf_cast(control_t*, pool); |
... | ... |
@@ -4,18 +4,40 @@ |
4 | 4 |
#if defined(TLSF_MALLOC) |
5 | 5 |
|
6 | 6 |
/* |
7 |
-** Two Level Segregated Fit memory allocator, version 3.0. |
|
8 |
-** Written by Matthew Conte, and placed in the Public Domain. |
|
7 |
+** Two Level Segregated Fit memory allocator, version 3.1. |
|
8 |
+** Written by Matthew Conte |
|
9 | 9 |
** http://tlsf.baisoku.org |
10 | 10 |
** |
11 | 11 |
** Based on the original documentation by Miguel Masmano: |
12 |
-** http://rtportal.upv.es/rtmalloc/allocators/tlsf/index.shtml |
|
13 |
-** |
|
14 |
-** Please see the accompanying Readme.txt for implementation |
|
15 |
-** notes and caveats. |
|
12 |
+** http://www.gii.upv.es/tlsf/main/docs |
|
16 | 13 |
** |
17 | 14 |
** This implementation was written to the specification |
18 | 15 |
** of the document, therefore no GPL restrictions apply. |
16 |
+** |
|
17 |
+** Copyright (c) 2006-2016, Matthew Conte |
|
18 |
+** All rights reserved. |
|
19 |
+** |
|
20 |
+** Redistribution and use in source and binary forms, with or without |
|
21 |
+** modification, are permitted provided that the following conditions are met: |
|
22 |
+** * Redistributions of source code must retain the above copyright |
|
23 |
+** notice, this list of conditions and the following disclaimer. |
|
24 |
+** * Redistributions in binary form must reproduce the above copyright |
|
25 |
+** notice, this list of conditions and the following disclaimer in the |
|
26 |
+** documentation and/or other materials provided with the distribution. |
|
27 |
+** * Neither the name of the copyright holder nor the |
|
28 |
+** names of its contributors may be used to endorse or promote products |
|
29 |
+** derived from this software without specific prior written permission. |
|
30 |
+** |
|
31 |
+** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND |
|
32 |
+** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
|
33 |
+** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
|
34 |
+** DISCLAIMED. IN NO EVENT SHALL MATTHEW CONTE BE LIABLE FOR ANY |
|
35 |
+** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
|
36 |
+** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
|
37 |
+** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
|
38 |
+** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|
39 |
+** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
|
40 |
+** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
19 | 41 |
*/ |
20 | 42 |
|
21 | 43 |
#include <stddef.h> |
... | ... |
@@ -50,6 +72,8 @@ void* tlsf_malloc(tlsf_t tlsf, size_t size, |
50 | 72 |
const char *file, const char *function, unsigned int line, const char *mname); |
51 | 73 |
void* tlsf_mallocxz(tlsf_t tlsf, size_t size, |
52 | 74 |
const char *file, const char *function, unsigned int line, const char *mname); |
75 |
+void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size, |
|
76 |
+ const char *file, const char *function, unsigned int line, const char *mname); |
|
53 | 77 |
void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size, |
54 | 78 |
const char *file, const char *function, unsigned int line, const char *mname); |
55 | 79 |
void* tlsf_reallocxf(tlsf_t tlsf, void* ptr, size_t size, |
... | ... |
@@ -59,6 +83,7 @@ void tlsf_free(tlsf_t tlsf, void* ptr, |
59 | 83 |
#else |
60 | 84 |
void* tlsf_malloc(tlsf_t tlsf, size_t bytes); |
61 | 85 |
void* tlsf_mallocxz(tlsf_t tlsf, size_t bytes); |
86 |
+void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t bytes); |
|
62 | 87 |
void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size); |
63 | 88 |
void* tlsf_reallocxf(tlsf_t tlsf, void* ptr, size_t size); |
64 | 89 |
void tlsf_free(tlsf_t tlsf, void* ptr); |
... | ... |
@@ -68,12 +93,12 @@ void tlsf_free(tlsf_t tlsf, void* ptr); |
68 | 93 |
size_t tlsf_block_size(void* ptr); |
69 | 94 |
|
70 | 95 |
/* Overheads/limits of internal structures. */ |
71 |
-size_t tlsf_size(); |
|
72 |
-size_t tlsf_align_size(); |
|
73 |
-size_t tlsf_block_size_min(); |
|
74 |
-size_t tlsf_block_size_max(); |
|
75 |
-size_t tlsf_pool_overhead(); |
|
76 |
-size_t tlsf_alloc_overhead(); |
|
96 |
+size_t tlsf_size(void); |
|
97 |
+size_t tlsf_align_size(void); |
|
98 |
+size_t tlsf_block_size_min(void); |
|
99 |
+size_t tlsf_block_size_max(void); |
|
100 |
+size_t tlsf_pool_overhead(void); |
|
101 |
+size_t tlsf_alloc_overhead(void); |
|
77 | 102 |
|
78 | 103 |
/* Debugging. */ |
79 | 104 |
typedef void (*tlsf_walker)(void* ptr, size_t size, int used, void* user); |