Browse code

mem/f_malloc: simplified free frags management

- avoid address referencing to previous free fragments, it can get
invalidated on joins
- straight doubly linked list also speeds join/defrag

Daniel-Constantin Mierla authored on 11/09/2015 14:32:32
Showing 2 changed files
... ...
@@ -91,7 +91,7 @@
91 91
 	((qm)->free_bitmap[(b)/FM_HASH_BMP_BITS] & (1UL<<((b)%FM_HASH_BMP_BITS)))
92 92
 
93 93
 
94
-
94
+#define fm_is_free(f) ((f)->u.nxt_free)
95 95
 /**
96 96
  * \brief Find the first free fragment in a memory block
97 97
  * 
... ...
@@ -173,38 +173,18 @@ inline static int fm_bmp_first_set(struct fm_block* qm, int start)
173 173
  */
174 174
 static inline void fm_extract_free(struct fm_block* qm, struct fm_frag* frag)
175 175
 {
176
-	struct fm_frag** pf;
177 176
 	int hash;
178 177
 
179
-	pf = frag->prv_free;
180 178
 	hash = GET_HASH(frag->size);
181 179
 
182
-	if(unlikely(pf==0)) {
183
-		/* try to discover previous fragment (safety review) */
184
-		LM_WARN("missing prev info for fragment %p from %p [%d]\n",
185
-					frag, qm, hash);
186
-		if(likely(qm->free_hash[hash].first)) {
187
-			if(likely(qm->free_hash[hash].first==frag)) {
188
-				pf = &(qm->free_hash[hash].first);
189
-			} else {
190
-				for(pf=&(qm->free_hash[hash].first); (*pf); pf=&((*pf)->u.nxt_free)) {
191
-					if((*pf)->u.nxt_free==frag) {
192
-						break;
193
-					}
194
-				}
195
-			}
196
-		}
197
-		if(unlikely(pf==0)) {
198
-			LM_ALERT("attemting to extract inexistent fragment %p from %p [%d]\n",
199
-					frag, qm, hash);
200
-			return;
201
-		}
202
-		frag->prv_free = pf;
180
+	if(frag->prv_free) {
181
+		frag->prv_free->u.nxt_free = frag->u.nxt_free;
182
+	} else {
183
+		qm->free_hash[hash].first = frag->u.nxt_free;
184
+	}
185
+	if(frag->u.nxt_free && frag->u.nxt_free!=qm->last_frag) {
186
+		frag->u.nxt_free->prv_free = frag->prv_free;
203 187
 	}
204
-
205
-	*pf=frag->u.nxt_free;
206
-
207
-	if(frag->u.nxt_free) frag->u.nxt_free->prv_free = pf;
208 188
 
209 189
 	qm->ffrags--;
210 190
 	qm->free_hash[hash].no--;
... ...
@@ -213,6 +193,7 @@ static inline void fm_extract_free(struct fm_block* qm, struct fm_frag* frag)
213 193
 		fm_bmp_reset(qm, hash);
214 194
 #endif /* F_MALLOC_HASH_BITMAP */
215 195
 	frag->prv_free = NULL;
196
+	frag->u.nxt_free = NULL;
216 197
 
217 198
 	qm->real_used+=frag->size;
218 199
 	qm->used+=frag->size;
... ...
@@ -225,24 +206,41 @@ static inline void fm_extract_free(struct fm_block* qm, struct fm_frag* frag)
225 206
  */
226 207
 static inline void fm_insert_free(struct fm_block* qm, struct fm_frag* frag)
227 208
 {
228
-	struct fm_frag** f;
209
+	struct fm_frag* f;
229 210
 	int hash;
230 211
 	
231 212
 	hash=GET_HASH(frag->size);
232
-	f=&(qm->free_hash[hash].first);
213
+	f=qm->free_hash[hash].first;
233 214
 	if (frag->size > F_MALLOC_OPTIMIZE){ /* because of '<=' in GET_HASH,
234 215
 											(different from 0.8.1[24] on
235 216
 											 purpose --andrei ) */
236
-		for(; *f; f=&((*f)->u.nxt_free)){
237
-			if (frag->size <= (*f)->size) break;
217
+		/* large fragments list -- add at a position ordered by size */
218
+		for(; f && f->u.nxt_free!=qm->last_frag; f=f->u.nxt_free){
219
+			if (frag->size <= f->size) break;
238 220
 		}
239
-	}
240 221
 	
241
-	/*insert it here*/
242
-	frag->prv_free = f;
243
-	frag->u.nxt_free=*f;
244
-	if (*f) (*f)->prv_free = &(frag->u.nxt_free);
245
-	*f=frag;
222
+		/*insert frag before f*/
223
+		frag->u.nxt_free = f;
224
+		if(f) {
225
+			frag->prv_free=f->prv_free;
226
+			if(f->prv_free) f->prv_free->u.nxt_free = frag;
227
+			if(qm->free_hash[hash].first==f) qm->free_hash[hash].first = frag;
228
+		} else {
229
+			/* to be only one in slot */
230
+			qm->free_hash[hash].first = frag;
231
+			frag->prv_free=0;
232
+		}
233
+	} else {
234
+		/* fixed fragment size list -- add first */
235
+		frag->prv_free=0;
236
+		if(f) {
237
+			f->prv_free = frag;
238
+			frag->u.nxt_free = f;
239
+		} else {
240
+			frag->u.nxt_free = qm->last_frag;
241
+		}
242
+		qm->free_hash[hash].first = frag;
243
+	}
246 244
 	qm->ffrags++;
247 245
 	qm->free_hash[hash].no++;
248 246
 #ifdef F_MALLOC_HASH_BITMAP
... ...
@@ -382,22 +380,23 @@ struct fm_frag* fm_search_defrag(struct fm_block* qm, unsigned long size)
382 380
 	while((char*)frag < (char*)qm->last_frag) {
383 381
 		nxt = FRAG_NEXT(frag);
384 382
 
385
-		if ( ((char*)nxt < (char*)qm->last_frag) && frag->prv_free
386
-				&& nxt->prv_free) {
387
-			/* join frag + nxt */
383
+		if ( ((char*)nxt < (char*)qm->last_frag) && fm_is_free(frag)
384
+				&& fm_is_free(nxt)) {
385
+			/* join frag with all next consecutive free frags */
388 386
 			fm_extract_free(qm, frag);
389 387
 			do {
390 388
 				fm_extract_free(qm, nxt);
391 389
 				frag->size += nxt->size + FRAG_OVERHEAD;
392 390
 
393
-				/* join - one frag less, add overhead to used */
391
+				/* after join - one frag less, add its overhead to used
392
+				 * (real_used already has it - f and n were extracted */
394 393
 				qm->used += FRAG_OVERHEAD;
395 394
 
396 395
 				if( frag->size >size )
397 396
 					return frag;
398 397
 
399 398
 				nxt = FRAG_NEXT(frag);
400
-			} while (((char*)nxt < (char*)qm->last_frag) && nxt->prv_free);
399
+			} while (((char*)nxt < (char*)qm->last_frag) && fm_is_free(nxt));
401 400
 
402 401
 			fm_insert_free(qm, frag);
403 402
 		}
... ...
@@ -518,40 +517,30 @@ finish:
518 517
 
519 518
 #ifdef MEM_JOIN_FREE
520 519
 /**
521
- * join fragment f with next one (if it is free)
520
+ * join fragment free frag f with next one (if it is free)
522 521
  */
523 522
 static void fm_join_frag(struct fm_block* qm, struct fm_frag* f)
524 523
 {
525 524
 	int hash;
526
-	struct fm_frag **pf;
527
-	struct fm_frag* n;
525
+	struct fm_frag *pf;
526
+	struct fm_frag *n;
528 527
 
529 528
 	n=FRAG_NEXT(f);
530
-	/* check if valid and if in free list */
531
-	if (((char*)n >= (char*)qm->last_frag) || (n->prv_free==NULL))
529
+
530
+	/* check if n is valid and if in free list */
531
+	if (((char*)n >= (char*)qm->last_frag) || !fm_is_free(n))
532 532
 		return;
533 533
 
534 534
 	/* detach n from the free list */
535
-	hash=GET_HASH(n->size);
536
-	pf=n->prv_free;
537
-	if (*pf==0){
538
-		/* not found, bad! */
539
-		LM_WARN("could not find %p in free list (hash=%ld)\n", n, GET_HASH(n->size));
540
-		return;
541
-	}
542
-	/* detach */
543
-	*pf=n->u.nxt_free;
544
-	if(n->u.nxt_free) n->u.nxt_free->prv_free = pf;
545
-	qm->ffrags--;
546
-	qm->free_hash[hash].no--;
547
-#ifdef F_MALLOC_HASH_BITMAP
548
-	if (qm->free_hash[hash].no==0)
549
-		fm_bmp_reset(qm, hash);
550
-#endif /* F_MALLOC_HASH_BITMAP */
551
-	/* join */
535
+	fm_extract_free(qm, n);
536
+
537
+	/* join - f extended with size of n plus its overhead */
552 538
 	f->size+=n->size+FRAG_OVERHEAD;
553
-	qm->real_used+=n->size;
554
-	qm->used+=n->size + FRAG_OVERHEAD;
539
+
540
+	/* after join - one frag less, add its overhead to used
541
+	 * (real_used already has it - f and n were extracted */
542
+	qm->used += FRAG_OVERHEAD;
543
+
555 544
 }
556 545
 #endif /*MEM_JOIN_FREE*/
557 546
 
... ...
@@ -593,7 +582,7 @@ void fm_free(struct fm_block* qm, void* p)
593 582
 	MDBG("fm_free: freeing block alloc'ed from %s: %s(%ld)\n",
594 583
 			f->file, f->func, f->line);
595 584
 #endif
596
-	if(unlikely(f->prv_free!=NULL)) {
585
+	if(unlikely(fm_is_free(f))) {
597 586
 		LM_INFO("freeing a free fragment (%p/%p) - ignore\n",
598 587
 				f, p);
599 588
 		return;
... ...
@@ -682,11 +671,12 @@ void* fm_realloc(struct fm_block* qm, void* p, unsigned long size)
682 671
 #endif
683 672
 		diff=size-f->size;
684 673
 		n=FRAG_NEXT(f);
674
+		/*if next frag is free, check if a join has enough size*/
685 675
 		if (((char*)n < (char*)qm->last_frag) && 
686
-				(n->prv_free) && ((n->size+FRAG_OVERHEAD)>=diff)){
676
+				fm_is_free(n) && ((n->size+FRAG_OVERHEAD)>=diff)){
687 677
 			/* detach n from the free list */
688 678
 			fm_extract_free(qm, n);
689
-			/* join  */
679
+			/* join */
690 680
 			f->size+=n->size+FRAG_OVERHEAD;
691 681
 			qm->used+=FRAG_OVERHEAD;
692 682
 
... ...
@@ -915,7 +905,7 @@ void fm_sums(struct fm_block* qm)
915 905
 	
916 906
 	for (f=qm->first_frag, i=0; (char*)f<(char*)qm->last_frag;
917 907
 			f=FRAG_NEXT(f), i++){
918
-		if (f->prv_free==0){
908
+		if (!fm_is_free(f)){
919 909
 			x = get_mem_counter(&root,f);
920 910
 			x->count++;
921 911
 			x->size+=f->size;
... ...
@@ -80,12 +80,13 @@ typedef unsigned long fm_hash_bitmap_t;
80 80
  * - +1 .... end -  size = 2^k, big buckets
81 81
  */
82 82
 struct fm_frag{
83
-	unsigned long size;
83
+	unsigned long size;           /* size of fragment */
84 84
 	union{
85
-		struct fm_frag* nxt_free;
85
+		struct fm_frag* nxt_free; /* next free frag in slot, last poitns to qm last_frag,
86
+									used to detect if fragment is free (when not null) */
86 87
 		long reserved;
87 88
 	}u;
88
-	struct fm_frag** prv_free;
89
+	struct fm_frag* prv_free;     /* prev free frag in slot - for faster join/defrag */
89 90
 #ifdef DBG_F_MALLOC
90 91
 	const char* file;
91 92
 	const char* func;