Browse code

core: mem - reworked f_malloc free chunks management

- credits to Juha Heinanen for heling with testing

Daniel-Constantin Mierla authored on 25/09/2015 21:38:56
Showing 2 changed files
... ...
@@ -91,7 +91,8 @@
91 91
 	((qm)->free_bitmap[(b)/FM_HASH_BMP_BITS] & (1UL<<((b)%FM_HASH_BMP_BITS)))
92 92
 
93 93
 
94
-#define fm_is_free(f) ((f)->u.nxt_free)
94
+#define fm_is_free(f) ((f)->is_free)
95
+
95 96
 /**
96 97
  * \brief Find the first free fragment in a memory block
97 98
  * 
... ...
@@ -158,12 +159,10 @@ inline static int fm_bmp_first_set(struct fm_block* qm, int start)
158 159
  * \name Memory manager boundary check pattern
159 160
  */
160 161
 /*@{ */
161
-#ifdef DBG_F_MALLOC
162 162
 #define ST_CHECK_PATTERN   0xf0f0f0f0 /** inserted at the beginning */
163 163
 #define END_CHECK_PATTERN1 0xc0c0c0c0 /** inserted at the end       */
164 164
 #define END_CHECK_PATTERN2 0xabcdefed /** inserted at the end       */
165 165
 /*@} */
166
-#endif
167 166
 
168 167
 
169 168
 /**
... ...
@@ -177,26 +176,25 @@ static inline void fm_extract_free(struct fm_block* qm, struct fm_frag* frag)
177 176
 
178 177
 	hash = GET_HASH(frag->size);
179 178
 
180
-	if(frag->prv_free) {
181
-		frag->prv_free->u.nxt_free = frag->u.nxt_free;
179
+	if(frag->prev_free) {
180
+		frag->prev_free->next_free = frag->next_free;
182 181
 	} else {
183
-		if(frag->u.nxt_free!=qm->last_frag)
184
-			qm->free_hash[hash].first = frag->u.nxt_free;
185
-		else
186
-			qm->free_hash[hash].first = NULL;
182
+		qm->free_hash[hash].first = frag->next_free;
187 183
 	}
188
-	if(frag->u.nxt_free && frag->u.nxt_free!=qm->last_frag) {
189
-		frag->u.nxt_free->prv_free = frag->prv_free;
184
+	if(frag->next_free) {
185
+		frag->next_free->prev_free = frag->prev_free;
190 186
 	}
191 187
 
188
+	frag->prev_free = NULL;
189
+	frag->next_free = NULL;
190
+	frag->is_free = 0;
191
+
192 192
 	qm->ffrags--;
193 193
 	qm->free_hash[hash].no--;
194 194
 #ifdef F_MALLOC_HASH_BITMAP
195 195
 	if (qm->free_hash[hash].no==0)
196 196
 		fm_bmp_reset(qm, hash);
197 197
 #endif /* F_MALLOC_HASH_BITMAP */
198
-	frag->prv_free = NULL;
199
-	frag->u.nxt_free = NULL;
200 198
 
201 199
 	qm->real_used+=frag->size;
202 200
 	qm->used+=frag->size;
... ...
@@ -210,55 +208,41 @@ static inline void fm_extract_free(struct fm_block* qm, struct fm_frag* frag)
210 208
 static inline void fm_insert_free(struct fm_block* qm, struct fm_frag* frag)
211 209
 {
212 210
 	struct fm_frag* f;
211
+	struct fm_frag* p;
213 212
 	int hash;
214
-	int after;
215 213
 	
216 214
 	hash=GET_HASH(frag->size);
217 215
 	f=qm->free_hash[hash].first;
216
+	p=NULL;
218 217
 	if (frag->size > F_MALLOC_OPTIMIZE){ /* because of '<=' in GET_HASH,
219 218
 											(different from 0.8.1[24] on
220 219
 											 purpose --andrei ) */
221
-		after = 0;
222 220
 		/* large fragments list -- add at a position ordered by size */
223
-		for(; f; f=f->u.nxt_free){
221
+		for(; f; f=f->next_free){
224 222
 			if (frag->size <= f->size) break;
225
-			if(f->u.nxt_free==qm->last_frag) {
226
-				/*size greater than last frag in slot*/
227
-				after = 1;
228
-				break;
229
-			}
223
+			p = f;
230 224
 		}
231 225
 	
226
+		frag->next_free = f;
227
+		frag->prev_free = p;
232 228
 		if(f) {
233
-			if(after) {
234
-				/*insert frag after f*/
235
-				frag->prv_free=f;
236
-				f->u.nxt_free=frag;
237
-				frag->u.nxt_free = qm->last_frag;
238
-			} else {
239
-				/*insert frag before f*/
240
-				frag->u.nxt_free = f;
241
-				frag->prv_free=f->prv_free;
242
-				if(f->prv_free) f->prv_free->u.nxt_free = frag;
243
-				if(qm->free_hash[hash].first==f) qm->free_hash[hash].first = frag;
244
-			}
229
+			f->prev_free = frag;
230
+		}
231
+		if(p) {
232
+			p->next_free = frag;
245 233
 		} else {
246
-			/* to be only one in slot */
247 234
 			qm->free_hash[hash].first = frag;
248
-			frag->prv_free=0;
249
-			frag->u.nxt_free = qm->last_frag;
250 235
 		}
251 236
 	} else {
252 237
 		/* fixed fragment size list -- add first */
253
-		frag->prv_free=0;
238
+		frag->prev_free = 0;
239
+		frag->next_free = f;
254 240
 		if(f) {
255
-			f->prv_free = frag;
256
-			frag->u.nxt_free = f;
257
-		} else {
258
-			frag->u.nxt_free = qm->last_frag;
241
+			f->prev_free = frag;
259 242
 		}
260 243
 		qm->free_hash[hash].first = frag;
261 244
 	}
245
+	frag->is_free = 1;
262 246
 	qm->ffrags++;
263 247
 	qm->free_hash[hash].no++;
264 248
 #ifdef F_MALLOC_HASH_BITMAP
... ...
@@ -307,8 +291,8 @@ void fm_split_frag(struct fm_block* qm, struct fm_frag* frag,
307 291
 		n->file=file;
308 292
 		n->func="frag. from fm_split_frag";
309 293
 		n->line=line;
310
-		n->check=ST_CHECK_PATTERN;
311 294
 #endif
295
+		n->check=ST_CHECK_PATTERN;
312 296
 		/* reinsert n in free list*/
313 297
 		qm->used-=FRAG_OVERHEAD;
314 298
 		fm_insert_free(qm, n);
... ...
@@ -366,17 +350,17 @@ struct fm_block* fm_malloc_init(char* address, unsigned long size, int type)
366 350
 	qm->last_frag=(struct fm_frag*)(end-sizeof(struct fm_frag));
367 351
 	/* init first fragment*/
368 352
 	qm->first_frag->size=size;
369
-	qm->first_frag->prv_free=0;
370
-	qm->first_frag->u.nxt_free=0;
353
+	qm->first_frag->prev_free=0;
354
+	qm->first_frag->next_free=0;
355
+	qm->first_frag->is_free=0;
371 356
 	/* init last fragment*/
372 357
 	qm->last_frag->size=0;
373
-	qm->last_frag->prv_free=0;
374
-	qm->last_frag->u.nxt_free=0;
358
+	qm->last_frag->prev_free=0;
359
+	qm->last_frag->next_free=0;
360
+	qm->last_frag->is_free=0;
375 361
 	
376
-#ifdef DBG_F_MALLOC
377 362
 	qm->first_frag->check=ST_CHECK_PATTERN;
378 363
 	qm->last_frag->check=END_CHECK_PATTERN1;
379
-#endif
380 364
 	
381 365
 	/* link initial fragment into the free list*/
382 366
 	
... ...
@@ -464,7 +448,7 @@ void* fm_malloc(void* qmp, unsigned long size)
464 448
 	if (likely(hash>=0)){
465 449
 		if (likely(hash<=F_MALLOC_OPTIMIZE/ROUNDTO)) { /* return first match */
466 450
 			f=qm->free_hash[hash].first;
467
-			if(likely(f && f!=qm->last_frag)) goto found;
451
+			if(likely(f)) goto found;
468 452
 #ifdef DBG_F_MALLOC
469 453
 			MDBG(" block %p hash %d empty but no. is %lu\n", qm,
470 454
 					hash, qm->free_hash[hash].no);
... ...
@@ -482,7 +466,7 @@ void* fm_malloc(void* qmp, unsigned long size)
482 466
 		   hash buckets.
483 467
 		*/
484 468
 		do {
485
-			for(f=qm->free_hash[hash].first; f && f!=qm->last_frag; f=f->u.nxt_free)
469
+			for(f=qm->free_hash[hash].first; f; f=f->next_free)
486 470
 				if (f->size>=size) goto found;
487 471
 			hash++; /* try in next hash cell */
488 472
 		}while((hash < F_HASH_SIZE) &&
... ...
@@ -491,7 +475,7 @@ void* fm_malloc(void* qmp, unsigned long size)
491 475
 #else /* F_MALLOC_HASH_BITMAP */
492 476
 	for(hash=GET_HASH(size);hash<F_HASH_SIZE;hash++){
493 477
 		f=qm->free_hash[hash].first;
494
-		for(;f && f!=qm->last_frag; f=f->u.nxt_free)
478
+		for(; f; f=f->u.nxt_free)
495 479
 			if (f->size>=size) goto found;
496 480
 		/* try in a bigger bucket */
497 481
 	}
... ...
@@ -523,10 +507,10 @@ finish:
523 507
 	frag->file=file;
524 508
 	frag->func=func;
525 509
 	frag->line=line;
526
-	frag->check=ST_CHECK_PATTERN;
527 510
 	MDBG("fm_malloc(%p, %lu) returns address %p \n", qm, size,
528 511
 		(char*)frag+sizeof(struct fm_frag));
529 512
 #endif
513
+	frag->check=ST_CHECK_PATTERN;
530 514
 
531 515
 	if (qm->max_real_used<qm->real_used)
532 516
 		qm->max_real_used=qm->real_used;
... ...
@@ -804,7 +788,7 @@ void fm_status(void* qmp)
804 788
 	for(h=0,i=0,size=0;h<F_HASH_SIZE;h++){
805 789
 		unused=0;
806 790
 		for (f=qm->free_hash[h].first,j=0; f;
807
-				size+=f->size,f=f->u.nxt_free,i++,j++){
791
+				size+=f->size,f=f->next_free,i++,j++){
808 792
 			if (!FRAG_WAS_USED(f)){
809 793
 				unused++;
810 794
 #ifdef DBG_F_MALLOC
... ...
@@ -79,19 +79,16 @@ typedef unsigned long fm_hash_bitmap_t;
79 79
  * - +1 .... end -  size = 2^k, big buckets
80 80
  */
81 81
 struct fm_frag{
82
-	unsigned long size;           /* size of fragment */
83
-	union{
84
-		struct fm_frag* nxt_free; /* next free frag in slot, last poitns to qm last_frag,
85
-									used to detect if fragment is free (when not null) */
86
-		long reserved;
87
-	}u;
88
-	struct fm_frag* prv_free;     /* prev free frag in slot - for faster join/defrag */
82
+	unsigned long size;         /* size of fragment */
83
+	struct fm_frag* next_free;  /* next free frag in slot */
84
+	struct fm_frag* prev_free;  /* prev free frag in slot - for faster join/defrag */
85
+	unsigned int is_free;       /* used to detect if fragment is free (when not 0) */
89 86
 #ifdef DBG_F_MALLOC
90 87
 	const char* file;
91 88
 	const char* func;
92 89
 	unsigned long line;
93
-	unsigned long check;
94 90
 #endif
91
+	unsigned int check;
95 92
 };
96 93
 
97 94
 struct fm_frag_lnk{