Browse code

core/mem: f_malloc updates for pool type and refactoring of parts

- removed define on statistics, it was always on for the past several
years - the code is cleaner
- total free fragements is kept in root memory block to avoid computing
it at runtime by walking hash slots
- attempt to join fragments when a malloc fails to reuse existing
fragments - should help when mem_join is 0, being very fast up to the
moment of an out of memory, when does the attempt of joining
- pkg stats updated to use the single event callback at the end of
malloc, free or realloc operations

Daniel-Constantin Mierla authored on 03/05/2014 18:10:00
Showing 2 changed files
... ...
@@ -190,6 +190,35 @@ inline static int fm_bmp_first_set(struct fm_block* qm, int start)
190 190
 #endif
191 191
 
192 192
 
193
+/**
194
+ * \brief Extract memory fragment from free list
195
+ * \param qm memory block
196
+ * \param frag memory fragment
197
+ */
198
+static inline void fm_extract_free(struct fm_block* qm, struct fm_frag* frag)
199
+{
200
+	struct fm_frag** pf;
201
+	int hash;
202
+
203
+	pf = frag->prv_free;
204
+	hash = GET_HASH(frag->size);
205
+
206
+	*pf=frag->u.nxt_free;
207
+
208
+	if(frag->u.nxt_free) frag->u.nxt_free->prv_free = pf;
209
+
210
+	qm->ffrags--;
211
+	qm->free_hash[hash].no--;
212
+#ifdef F_MALLOC_HASH_BITMAP
213
+	if (qm->free_hash[hash].no==0)
214
+		fm_bmp_reset(qm, hash);
215
+#endif /* F_MALLOC_HASH_BITMAP */
216
+	frag->prv_free = NULL;
217
+
218
+	qm->real_used+=frag->size;
219
+	qm->used+=frag->size;
220
+}
221
+
193 222
 /**
194 223
  * \brief Insert a memory fragment in a memory block
195 224
  * \param qm memory block
... ...
@@ -215,10 +244,13 @@ static inline void fm_insert_free(struct fm_block* qm, struct fm_frag* frag)
215 244
 	frag->u.nxt_free=*f;
216 245
 	if (*f) (*f)->prv_free = &(frag->u.nxt_free);
217 246
 	*f=frag;
247
+	qm->ffrags++;
218 248
 	qm->free_hash[hash].no++;
219 249
 #ifdef F_MALLOC_HASH_BITMAP
220 250
 	fm_bmp_set(qm, hash);
221 251
 #endif /* F_MALLOC_HASH_BITMAP */
252
+	qm->used-=frag->size;
253
+	qm->real_used-=frag->size;
222 254
 }
223 255
 
224 256
 
... ...
@@ -255,16 +287,12 @@ void fm_split_frag(struct fm_block* qm, struct fm_frag* frag,
255 287
 		n=FRAG_NEXT(frag);
256 288
 		n->size=rest-FRAG_OVERHEAD;
257 289
 		FRAG_CLEAR_USED(n); /* never used */
258
-#if defined(DBG_F_MALLOC) || defined(MALLOC_STATS)
290
+		/* new frag overhead */
259 291
 		qm->real_used+=FRAG_OVERHEAD;
260
-#ifdef MALLOC_STATS
261
-		sr_event_exec(SREV_PKG_SET_REAL_USED, (void*)qm->real_used);
262
-#endif
263
-#endif
264 292
 #ifdef DBG_F_MALLOC
265 293
 		/* frag created by malloc, mark it*/
266 294
 		n->file=file;
267
-		n->func="frag. from fm_malloc";
295
+		n->func="frag. from fm_split_frag";
268 296
 		n->line=line;
269 297
 		n->check=ST_CHECK_PATTERN;
270 298
 #endif
... ...
@@ -282,7 +310,7 @@ void fm_split_frag(struct fm_block* qm, struct fm_frag* frag,
282 310
  * \param size Size of allocation
283 311
  * \return return the fm_block
284 312
  */
285
-struct fm_block* fm_malloc_init(char* address, unsigned long size)
313
+struct fm_block* fm_malloc_init(char* address, unsigned long size, int type)
286 314
 {
287 315
 	char* start;
288 316
 	char* end;
... ...
@@ -314,10 +342,10 @@ struct fm_block* fm_malloc_init(char* address, unsigned long size)
314 342
 	qm=(struct fm_block*)start;
315 343
 	memset(qm, 0, sizeof(struct fm_block));
316 344
 	qm->size=size;
317
-#if defined(DBG_F_MALLOC) || defined(MALLOC_STATS)
318
-	qm->real_used=init_overhead;
319
-	qm->max_real_used=qm->real_used;
320
-#endif
345
+	qm->used = size;
346
+	qm->real_used=size + init_overhead;
347
+	qm->max_real_used=init_overhead;
348
+	qm->type = type;
321 349
 	size-=init_overhead;
322 350
 	
323 351
 	qm->first_frag=(struct fm_frag*)(start+ROUNDUP(sizeof(struct fm_block)));
... ...
@@ -337,11 +365,49 @@ struct fm_block* fm_malloc_init(char* address, unsigned long size)
337 365
 	/* link initial fragment into the free list*/
338 366
 	
339 367
 	fm_insert_free(qm, qm->first_frag);
340
-	
341
-	
368
+
342 369
 	return qm;
343 370
 }
344 371
 
372
+/**
373
+ * \brief Try merging free fragments to fit requested size
374
+ * \param qm memory block
375
+ * \param size memory allocation size
376
+ * \return address of allocated memory
377
+ */
378
+struct fm_frag* fm_search_defrag(struct fm_block* qm, unsigned long size)
379
+{
380
+	struct fm_frag* frag;
381
+	struct fm_frag* nxt;
382
+
383
+	frag = qm->first_frag;
384
+	while((char*)frag < (char*)qm->last_frag) {
385
+		nxt = FRAG_NEXT(frag);
386
+
387
+		if ( ((char*)nxt < (char*)qm->last_frag) && frag->prv_free
388
+				&& nxt->prv_free) {
389
+			/* join frag + nxt */
390
+			fm_extract_free(qm, frag);
391
+			do {
392
+				fm_extract_free(qm, nxt);
393
+				frag->size += nxt->size + FRAG_OVERHEAD;
394
+
395
+				/* join - one frag less, remove overhead */
396
+				qm->real_used -= FRAG_OVERHEAD;
397
+
398
+				if( frag->size >size )
399
+					return frag;
400
+
401
+				nxt = FRAG_NEXT(frag);
402
+			} while (((char*)nxt < (char*)qm->last_frag) && nxt->prv_free);
403
+
404
+			fm_insert_free(qm, frag);
405
+		}
406
+		frag = nxt;
407
+	}
408
+
409
+	return 0;
410
+}
345 411
 
346 412
 /**
347 413
  * \brief Main memory manager allocation function
... ...
@@ -370,8 +436,6 @@ void* fm_malloc(struct fm_block* qm, unsigned long size)
370 436
 	if(unlikely(size==0)) size=4;
371 437
 	/*size must be a multiple of 8*/
372 438
 	size=ROUNDUP(size);
373
-/*	if (size>(qm->size-qm->real_used)) return 0; */
374
-
375 439
 	
376 440
 	/*search for a suitable free frag*/
377 441
 
... ...
@@ -407,57 +471,49 @@ void* fm_malloc(struct fm_block* qm, unsigned long size)
407 471
 #else /* F_MALLOC_HASH_BITMAP */
408 472
 	for(hash=GET_HASH(size);hash<F_HASH_SIZE;hash++){
409 473
 		f=&(qm->free_hash[hash].first);
410
-#if 0
411
-		if (likely(hash<=F_MALLOC_OPTIMIZE/ROUNDTO)) /* return first match */
412
-				goto found; 
413
-#endif
414 474
 		for(;(*f); f=&((*f)->u.nxt_free))
415 475
 			if ((*f)->size>=size) goto found;
416 476
 		/* try in a bigger bucket */
417 477
 	}
418 478
 #endif /* F_MALLOC_HASH_BITMAP */
419
-	/* not found, bad! */
479
+	/* not found, search by defrag */
480
+
481
+	frag = fm_search_defrag(qm, size);
482
+
483
+	if(frag) goto finish;
484
+
420 485
 	return 0;
421 486
 
422 487
 found:
423 488
 	/* we found it!*/
424 489
 	/* detach it from the free list*/
425 490
 	frag=*f;
426
-	if(frag->u.nxt_free) frag->u.nxt_free->prv_free = frag->prv_free;
427
-	*f=frag->u.nxt_free;
428
-	frag->u.nxt_free=0; /* mark it as 'taken' */
429
-	frag->prv_free=0;
430
-	qm->free_hash[hash].no--;
431
-#ifdef F_MALLOC_HASH_BITMAP
432
-	if (qm->free_hash[hash].no==0)
433
-		fm_bmp_reset(qm, hash);
434
-#endif /* F_MALLOC_HASH_BITMAP */
435
-	
436
-	/*see if we'll use full frag, or we'll split it in 2*/
437
-	
491
+	fm_extract_free(qm, frag);
492
+
493
+	/*see if use full frag or split it in two*/
438 494
 #ifdef DBG_F_MALLOC
439 495
 	fm_split_frag(qm, frag, size, file, func, line);
496
+#else
497
+	fm_split_frag(qm, frag, size);
498
+#endif
499
+
500
+finish:
440 501
 
502
+#ifdef DBG_F_MALLOC
441 503
 	frag->file=file;
442 504
 	frag->func=func;
443 505
 	frag->line=line;
444 506
 	frag->check=ST_CHECK_PATTERN;
445 507
 	MDBG("fm_malloc(%p, %lu) returns address %p \n", qm, size,
446 508
 		(char*)frag+sizeof(struct fm_frag));
447
-#else
448
-	fm_split_frag(qm, frag, size);
449 509
 #endif
450
-#if defined(DBG_F_MALLOC) || defined(MALLOC_STATS)
451
-	qm->real_used+=frag->size;
452
-	qm->used+=frag->size;
510
+
453 511
 	if (qm->max_real_used<qm->real_used)
454 512
 		qm->max_real_used=qm->real_used;
455
-#ifdef MALLOC_STATS
456
-	sr_event_exec(SREV_PKG_SET_USED, (void*)qm->used);
457
-	sr_event_exec(SREV_PKG_SET_REAL_USED, (void*)qm->real_used);
458
-#endif
459
-#endif
460 513
 	FRAG_MARK_USED(frag); /* mark it as used */
514
+	if(qm->type==MEM_TYPE_PKG) {
515
+		sr_event_exec(SREV_PKG_UPDATE_STATS, 0);
516
+	}
461 517
 	return (char*)frag+sizeof(struct fm_frag);
462 518
 }
463 519
 
... ...
@@ -474,7 +530,7 @@ static void fm_join_frag(struct fm_block* qm, struct fm_frag* f)
474 530
 
475 531
 	n=FRAG_NEXT(f);
476 532
 	/* check if valid and if in free list */
477
-	if (((char*)n >= (char*)qm->last_frag) || (n->u.nxt_free==NULL))
533
+	if (((char*)n >= (char*)qm->last_frag) || (n->prv_free==NULL))
478 534
 		return;
479 535
 
480 536
 	/* detach n from the free list */
... ...
@@ -488,6 +544,7 @@ static void fm_join_frag(struct fm_block* qm, struct fm_frag* f)
488 544
 	/* detach */
489 545
 	*pf=n->u.nxt_free;
490 546
 	if(n->u.nxt_free) n->u.nxt_free->prv_free = pf;
547
+	qm->ffrags--;
491 548
 	qm->free_hash[hash].no--;
492 549
 #ifdef F_MALLOC_HASH_BITMAP
493 550
 	if (qm->free_hash[hash].no==0)
... ...
@@ -495,12 +552,7 @@ static void fm_join_frag(struct fm_block* qm, struct fm_frag* f)
495 552
 #endif /* F_MALLOC_HASH_BITMAP */
496 553
 	/* join */
497 554
 	f->size+=n->size+FRAG_OVERHEAD;
498
-#if defined(DBG_F_MALLOC) || defined(MALLOC_STATS)
499 555
 	qm->real_used-=FRAG_OVERHEAD;
500
-#ifdef MALLOC_STATS
501
-	sr_event_exec(SREV_PKG_SET_REAL_USED, (void*)qm->real_used);
502
-#endif /* MALLOC_STATS */
503
-#endif /* DBG_F_MALLOC || MALLOC_STATS*/
504 556
 }
505 557
 #endif /*MEM_JOIN_FREE*/
506 558
 
... ...
@@ -519,7 +571,6 @@ void fm_free(struct fm_block* qm, void* p)
519 571
 #endif
520 572
 {
521 573
 	struct fm_frag* f;
522
-	unsigned long size;
523 574
 
524 575
 #ifdef DBG_F_MALLOC
525 576
 	MDBG("fm_free(%p, %p), called from %s: %s(%d)\n", qm, p, file, func, line);
... ...
@@ -543,20 +594,14 @@ void fm_free(struct fm_block* qm, void* p)
543 594
 	MDBG("fm_free: freeing block alloc'ed from %s: %s(%ld)\n",
544 595
 			f->file, f->func, f->line);
545 596
 #endif
546
-	if(unlikely(f->u.nxt_free!=NULL)) {
597
+	if(unlikely(f->prv_free!=NULL)) {
547 598
 		LM_INFO("freeing a free fragment (%p/%p) - ignore\n",
548 599
 				f, p);
549 600
 		return;
550 601
 	}
551
-	size=f->size;
552
-#if defined(DBG_F_MALLOC) || defined(MALLOC_STATS)
553
-	qm->used-=size;
554
-	qm->real_used-=size;
555
-#ifdef MALLOC_STATS
556
-	sr_event_exec(SREV_PKG_SET_USED, (void*)qm->used);
557
-	sr_event_exec(SREV_PKG_SET_REAL_USED, (void*)qm->real_used);
558
-#endif
559
-#endif
602
+	if(qm->type==MEM_TYPE_PKG) {
603
+		sr_event_exec(SREV_PKG_UPDATE_STATS, 0);
604
+	}
560 605
 #ifdef DBG_F_MALLOC
561 606
 	f->file=file;
562 607
 	f->func=func;
... ...
@@ -633,16 +678,10 @@ void* fm_realloc(struct fm_block* qm, void* p, unsigned long size)
633 678
 #else
634 679
 		fm_split_frag(qm, f, size);
635 680
 #endif
636
-#if defined(DBG_F_MALLOC) || defined(MALLOC_STATS)
637 681
 		/* fm_split frag already adds FRAG_OVERHEAD for the newly created
638 682
 		   free frag, so here we only need orig_size-f->size for real used */
639 683
 		qm->real_used-=(orig_size-f->size);
640 684
 		qm->used-=(orig_size-f->size);
641
-#ifdef MALLOC_STATS
642
-		sr_event_exec(SREV_PKG_SET_USED, (void*)qm->used);
643
-		sr_event_exec(SREV_PKG_SET_REAL_USED, (void*)qm->real_used);
644
-#endif
645
-#endif
646 685
 	}else if (f->size<size){
647 686
 		/* grow */
648 687
 #ifdef DBG_F_MALLOC
... ...
@@ -651,7 +690,7 @@ void* fm_realloc(struct fm_block* qm, void* p, unsigned long size)
651 690
 		diff=size-f->size;
652 691
 		n=FRAG_NEXT(f);
653 692
 		if (((char*)n < (char*)qm->last_frag) && 
654
-				(n->u.nxt_free)&&((n->size+FRAG_OVERHEAD)>=diff)){
693
+				(n->prv_free) && ((n->size+FRAG_OVERHEAD)>=diff)){
655 694
 			/* join  */
656 695
 			/* detach n from the free list */
657 696
 			hash=GET_HASH(n->size);
... ...
@@ -665,6 +704,7 @@ void* fm_realloc(struct fm_block* qm, void* p, unsigned long size)
665 704
 			/* detach */
666 705
 			*pf=n->u.nxt_free;
667 706
 			if(n->u.nxt_free) n->u.nxt_free->prv_free = pf;
707
+			qm->ffrags--;
668 708
 			qm->free_hash[hash].no--;
669 709
 #ifdef F_MALLOC_HASH_BITMAP
670 710
 			if (qm->free_hash[hash].no==0)
... ...
@@ -672,12 +712,8 @@ void* fm_realloc(struct fm_block* qm, void* p, unsigned long size)
672 712
 #endif /* F_MALLOC_HASH_BITMAP */
673 713
 			/* join */
674 714
 			f->size+=n->size+FRAG_OVERHEAD;
675
-		#if defined(DBG_F_MALLOC) || defined(MALLOC_STATS)
676 715
 			qm->real_used-=FRAG_OVERHEAD;
677
-#ifdef MALLOC_STATS
678
-			sr_event_exec(SREV_PKG_SET_REAL_USED, (void*)qm->real_used);
679
-#endif
680
-		#endif
716
+
681 717
 			/* split it if necessary */
682 718
 			if (f->size > size){
683 719
 		#ifdef DBG_F_MALLOC
... ...
@@ -687,14 +723,8 @@ void* fm_realloc(struct fm_block* qm, void* p, unsigned long size)
687 723
 				fm_split_frag(qm, f, size);
688 724
 		#endif
689 725
 			}
690
-		#if defined(DBG_F_MALLOC) || defined(MALLOC_STATS)
691 726
 			qm->real_used+=(f->size-orig_size);
692 727
 			qm->used+=(f->size-orig_size);
693
-#ifdef MALLOC_STATS
694
-			sr_event_exec(SREV_PKG_SET_USED, (void*)qm->used);
695
-			sr_event_exec(SREV_PKG_SET_REAL_USED, (void*)qm->real_used);
696
-#endif
697
-		#endif
698 728
 		}else{
699 729
 			/* could not join => realloc */
700 730
 	#ifdef DBG_F_MALLOC
... ...
@@ -723,6 +753,9 @@ void* fm_realloc(struct fm_block* qm, void* p, unsigned long size)
723 753
 #ifdef DBG_F_MALLOC
724 754
 	MDBG("fm_realloc: returning %p\n", p);
725 755
 #endif
756
+	if(qm->type==MEM_TYPE_PKG) {
757
+		sr_event_exec(SREV_PKG_UPDATE_STATS, 0);
758
+	}
726 759
 	return p;
727 760
 }
728 761
 
... ...
@@ -831,43 +864,14 @@ void fm_status(struct fm_block* qm)
831 864
  */
832 865
 void fm_info(struct fm_block* qm, struct mem_info* info)
833 866
 {
834
-	int r;
835
-	long total_frags;
836
-#if !defined(DBG_F_MALLOC) && !defined(MALLOC_STATS)
837
-	struct fm_frag* f;
838
-#endif
839
-	
840 867
 	memset(info,0, sizeof(*info));
841
-	total_frags=0;
842 868
 	info->total_size=qm->size;
843 869
 	info->min_frag=MIN_FRAG_SIZE;
844
-#if defined(DBG_F_MALLOC) || defined(MALLOC_STATS)
845 870
 	info->free=qm->size-qm->real_used;
846 871
 	info->used=qm->used;
847 872
 	info->real_used=qm->real_used;
848 873
 	info->max_used=qm->max_real_used;
849
-	for(r=0;r<F_HASH_SIZE; r++){
850
-		total_frags+=qm->free_hash[r].no;
851
-	}
852
-#else
853
-	/* we'll have to compute it all */
854
-	for (r=0; r<=F_MALLOC_OPTIMIZE/ROUNDTO; r++){
855
-		info->free+=qm->free_hash[r].no*UN_HASH(r);
856
-		total_frags+=qm->free_hash[r].no;
857
-	}
858
-	for(;r<F_HASH_SIZE; r++){
859
-		total_frags+=qm->free_hash[r].no;
860
-		for(f=qm->free_hash[r].first;f;f=f->u.nxt_free){
861
-			info->free+=f->size;
862
-		}
863
-	}
864
-	info->real_used=info->total_size-info->free;
865
-	info->used=0; /* we don't really now */
866
-	info->used=info->real_used-total_frags*FRAG_OVERHEAD-INIT_OVERHEAD-
867
-					FRAG_OVERHEAD;
868
-	info->max_used=0; /* we don't really now */
869
-#endif
870
-	info->total_frags=total_frags;
874
+	info->total_frags=qm->ffrags;
871 875
 }
872 876
 
873 877
 
... ...
@@ -879,14 +883,7 @@ void fm_info(struct fm_block* qm, struct mem_info* info)
879 883
  */
880 884
 unsigned long fm_available(struct fm_block* qm)
881 885
 {
882
-
883
-#if defined(DBG_F_MALLOC) || defined(MALLOC_STATS)
884 886
 	return qm->size-qm->real_used;
885
-#else
886
-	/* we don't know how much free memory we have and it's to expensive
887
-	 * to compute it */
888
-	return ((unsigned long)-1);
889
-#endif
890 887
 }
891 888
 
892 889
 
... ...
@@ -931,8 +928,7 @@ make_new:
931 928
 void fm_sums(struct fm_block* qm)
932 929
 {
933 930
 	struct fm_frag* f;
934
-	struct fm_frag* free_frag;
935
-	int i, hash;
931
+	int i;
936 932
 	int memlog;
937 933
 	mem_counter *root,*x;
938 934
 	
... ...
@@ -945,19 +941,10 @@ void fm_sums(struct fm_block* qm)
945 941
 	
946 942
 	for (f=qm->first_frag, i=0; (char*)f<(char*)qm->last_frag;
947 943
 			f=FRAG_NEXT(f), i++){
948
-		if (f->u.nxt_free==0){
949
-			/* it might be in-use or the last free fragm. in a free list 
950
-			   => search the free frags of the same size for a possible
951
-			   match --andrei*/
952
-			hash=GET_HASH(f->size);
953
-			for(free_frag=qm->free_hash[hash].first;
954
-					free_frag && (free_frag!=f);
955
-					free_frag=free_frag->u.nxt_free);
956
-			if (free_frag==0){ /* not found among the free frag */
957
-				x = get_mem_counter(&root,f);
958
-				x->count++;
959
-				x->size+=f->size;
960
-			}
944
+		if (f->prv_free==0){
945
+			x = get_mem_counter(&root,f);
946
+			x->count++;
947
+			x->size+=f->size;
961 948
 		}
962 949
 	}
963 950
 	x = root;
... ...
@@ -115,12 +115,12 @@ struct fm_frag_lnk{
115 115
  * \see mem_info
116 116
  */
117 117
 struct fm_block{
118
+	int type;
118 119
 	unsigned long size; /** total size */
119
-#if defined(DBG_F_MALLOC) || defined(MALLOC_STATS)
120 120
 	unsigned long used; /** allocated size*/
121 121
 	unsigned long real_used; /** used + malloc overhead */
122 122
 	unsigned long max_real_used;
123
-#endif
123
+	unsigned long ffrags;
124 124
 	
125 125
 	struct fm_frag* first_frag;
126 126
 	struct fm_frag* last_frag;
... ...
@@ -137,7 +137,7 @@ struct fm_block{
137 137
  * \param size Size of allocation
138 138
  * \return return the fm_block
139 139
  */
140
-struct fm_block* fm_malloc_init(char* address, unsigned long size);
140
+struct fm_block* fm_malloc_init(char* address, unsigned long size, int type);
141 141
 
142 142
 
143 143
 /**