Browse code

- malloc changes: fragment avoidance, bookkeeping, hooks for future full mem. defragmenter

Andrei Pelinescu-Onciul authored on 19/07/2004 13:45:50
Showing 5 changed files
... ...
@@ -45,7 +45,7 @@ export makefile_defs
45 45
 VERSION = 0
46 46
 PATCHLEVEL = 8
47 47
 SUBLEVEL =   13
48
-EXTRAVERSION = -dev-35
48
+EXTRAVERSION = -dev-36-malloc
49 49
 
50 50
 RELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
51 51
 OS = $(shell uname -s | sed -e s/SunOS/solaris/ | tr "[A-Z]" "[a-z]")
... ...
@@ -282,8 +282,8 @@ DEFS+= $(extra_defs) \
282 282
 	 -DUSE_TCP \
283 283
 	 -DDISABLE_NAGLE \
284 284
 	 -DF_MALLOC \
285
-	# -DDBG_F_MALLOC \
286 285
 	# -DDBG_QM_MALLOC \
286
+	# -DDBG_F_MALLOC \
287 287
 	 #-DF_MALLOC \
288 288
 	 #-DNO_DEBUG \
289 289
 	 #-DNO_LOG
... ...
@@ -29,6 +29,9 @@
29 29
  * --------
30 30
  *              created by andrei
31 31
  *  2003-07-06  added fm_realloc (andrei)
32
+ *  2004-07-19  fragments book keeping code and support for 64 bits
33
+ *               memory blocks (64 bits machine & size >=2^32) 
34
+ *              GET_HASH s/</<=/ (avoids waste of 1 hash cell)   (andrei)
32 35
  */
33 36
 
34 37
 
... ...
@@ -63,15 +66,26 @@
63 66
 
64 67
 
65 68
 	/* finds the hash value for s, s=ROUNDTO multiple*/
66
-#define GET_HASH(s)   ( ((s)<F_MALLOC_OPTIMIZE)?(s)/ROUNDTO: \
69
+#define GET_HASH(s)   ( ((s)<=F_MALLOC_OPTIMIZE)?(s)/ROUNDTO: \
67 70
 						F_MALLOC_OPTIMIZE/ROUNDTO+big_hash_idx((s))- \
68 71
 							F_MALLOC_OPTIMIZE_FACTOR+1 )
69 72
 
70
-#define UN_HASH(h)	( ((h)<(F_MALLOC_OPTIMIZE/ROUNDTO))?(h)*ROUNDTO: \
73
+#define UN_HASH(h)	( ((h)<=(F_MALLOC_OPTIMIZE/ROUNDTO))?(h)*ROUNDTO: \
71 74
 						1<<((h)-F_MALLOC_OPTIMIZE/ROUNDTO+\
72 75
 							F_MALLOC_OPTIMIZE_FACTOR-1)\
73 76
 					)
74 77
 
78
+/* mark/test used/unused frags */
79
+#define FRAG_MARK_USED(f)
80
+#define FRAG_CLEAR_USED(f)
81
+#define FRAG_WAS_USED(f)   (1)
82
+
83
+/* other frag related defines:
84
+ * MEM_COALESCE_FRAGS 
85
+ * MEM_FRAG_AVOIDANCE
86
+ */
87
+#define MEM_FRAG_AVOIDANCE
88
+
75 89
 
76 90
 /* computes hash number for big buckets*/
77 91
 inline static int big_hash_idx(int s)
... ...
@@ -81,7 +95,8 @@ inline static int big_hash_idx(int s)
81 95
 	 * index= i such that 2^i > s >= 2^(i-1)
82 96
 	 *
83 97
 	 * => index = number of the first non null bit in s*/
84
-	for (idx=31; !(s&0x80000000) ; s<<=1, idx--);
98
+	idx=sizeof(long)*8-1;
99
+	for (; !(s&(1<<(sizeof(long)*8-1))) ; s<<=1, idx--);
85 100
 	return idx;
86 101
 }
87 102
 
... ...
@@ -100,8 +115,10 @@ static inline void fm_insert_free(struct fm_block* qm, struct fm_frag* frag)
100 115
 	int hash;
101 116
 	
102 117
 	hash=GET_HASH(frag->size);
103
-	f=&(qm->free_hash[hash]);
104
-	if (frag->size > F_MALLOC_OPTIMIZE){
118
+	f=&(qm->free_hash[hash].first);
119
+	if (frag->size > F_MALLOC_OPTIMIZE){ /* because of '<=' in GET_HASH,
120
+											(different from 0.8.1[24] on
121
+											 purpose --andrei ) */
105 122
 		for(; *f; f=&((*f)->u.nxt_free)){
106 123
 			if (frag->size <= (*f)->size) break;
107 124
 		}
... ...
@@ -110,6 +127,7 @@ static inline void fm_insert_free(struct fm_block* qm, struct fm_frag* frag)
110 127
 	/*insert it here*/
111 128
 	frag->u.nxt_free=*f;
112 129
 	*f=frag;
130
+	qm->free_hash[hash].no++;
113 131
 }
114 132
 
115 133
 
... ...
@@ -127,11 +145,17 @@ void fm_split_frag(struct fm_block* qm, struct fm_frag* frag,unsigned int size)
127 145
 	struct fm_frag* n;
128 146
 	
129 147
 	rest=frag->size-size;
148
+#ifdef MEM_FRAG_AVOIDANCE
149
+	if ((rest> (FRAG_OVERHEAD+F_MALLOC_OPTIMIZE))||
150
+		(rest>=(FRAG_OVERHEAD+size))){ /* the residue fragm. is big enough*/
151
+#else
130 152
 	if (rest>(FRAG_OVERHEAD+MIN_FRAG_SIZE)){
153
+#endif
131 154
 		frag->size=size;
132 155
 		/*split the fragment*/
133 156
 		n=FRAG_NEXT(frag);
134 157
 		n->size=rest-FRAG_OVERHEAD;
158
+		FRAG_CLEAR_USED(n); /* never used */
135 159
 #ifdef DBG_F_MALLOC
136 160
 		qm->real_used+=FRAG_OVERHEAD;
137 161
 		/* frag created by malloc, mark it*/
... ...
@@ -226,7 +250,7 @@ void* fm_malloc(struct fm_block* qm, unsigned int size)
226 250
 	/*search for a suitable free frag*/
227 251
 
228 252
 	for(hash=GET_HASH(size);hash<F_HASH_SIZE;hash++){
229
-		f=&(qm->free_hash[hash]);
253
+		f=&(qm->free_hash[hash].first);
230 254
 		for(;(*f); f=&((*f)->u.nxt_free))
231 255
 			if ((*f)->size>=size) goto found;
232 256
 		/* try in a bigger bucket */
... ...
@@ -240,6 +264,7 @@ found:
240 264
 	frag=*f;
241 265
 	*f=frag->u.nxt_free;
242 266
 	frag->u.nxt_free=0; /* mark it as 'taken' */
267
+	qm->free_hash[hash].no--;
243 268
 	
244 269
 	/*see if we'll use full frag, or we'll split it in 2*/
245 270
 	
... ...
@@ -260,6 +285,7 @@ found:
260 285
 #else
261 286
 	fm_split_frag(qm, frag, size);
262 287
 #endif
288
+	FRAG_MARK_USED(frag); /* mark it as used */
263 289
 	return (char*)frag+sizeof(struct fm_frag);
264 290
 }
265 291
 
... ...
@@ -318,6 +344,7 @@ void* fm_realloc(struct fm_block* qm, void* p, unsigned int size)
318 344
 	unsigned int orig_size;
319 345
 	struct fm_frag *n;
320 346
 	void *ptr;
347
+	int hash;
321 348
 	
322 349
 #ifdef DBG_F_MALLOC
323 350
 	DBG("fm_realloc(%p, %p, %d) called from %s: %s(%d)\n", qm, p, size,
... ...
@@ -371,9 +398,10 @@ void* fm_realloc(struct fm_block* qm, void* p, unsigned int size)
371 398
 				(n->u.nxt_free)&&((n->size+FRAG_OVERHEAD)>=diff)){
372 399
 			/* join  */
373 400
 			/* detach n from the free list */
374
-			pf=&(qm->free_hash[GET_HASH(n->size)]);
401
+			hash=GET_HASH(n->size);
402
+			pf=&(qm->free_hash[hash].first);
375 403
 			/* find it */
376
-			for(;(*pf)&&(*pf!=n); pf=&((*pf)->u.nxt_free));
404
+			for(;(*pf)&&(*pf!=n); pf=&((*pf)->u.nxt_free)); /*FIXME slow */
377 405
 			if (*pf==0){
378 406
 				/* not found, bad! */
379 407
 				LOG(L_CRIT, "BUG: fm_realloc: could not find %p in free "
... ...
@@ -382,6 +410,7 @@ void* fm_realloc(struct fm_block* qm, void* p, unsigned int size)
382 410
 			}
383 411
 			/* detach */
384 412
 			*pf=n->u.nxt_free;
413
+			qm->free_hash[hash].no--;
385 414
 			/* join */
386 415
 			f->size+=n->size+FRAG_OVERHEAD;
387 416
 		#ifdef DBG_F_MALLOC
... ...
@@ -436,6 +465,7 @@ void fm_status(struct fm_block* qm)
436 465
 	struct fm_frag* f;
437 466
 	int i,j;
438 467
 	int h;
468
+	int unused;
439 469
 	long size;
440 470
 
441 471
 	LOG(memlog, "fm_status (%p):\n", qm);
... ...
@@ -462,14 +492,29 @@ void fm_status(struct fm_block* qm)
462 492
 */
463 493
 	LOG(memlog, "dumping free list:\n");
464 494
 	for(h=0,i=0,size=0;h<F_HASH_SIZE;h++){
465
-		
466
-		for (f=qm->free_hash[h],j=0; f; size+=f->size,f=f->u.nxt_free,i++,j++);
467
-		if (j) LOG(memlog, "hash = %3d fragments no.: %5d,\n\t\t"
495
+		unused=0;
496
+		for (f=qm->free_hash[h].first,j=0; f;
497
+				size+=f->size,f=f->u.nxt_free,i++,j++){
498
+			if (!FRAG_WAS_USED(f)){
499
+				unused++;
500
+#ifdef DBG_FM_MALLOC
501
+				LOG(memlog, "unused fragm.: hash = %3d, fragment %x,"
502
+							" address %x size %d, created from %s: %s(%d)\n",
503
+						    h, f, (char*)f+sizeof(struct fm_frag), f->size,
504
+							f->file, f->func, f->line);
505
+#endif
506
+			};
507
+		}
508
+		if (j) LOG(memlog, "hash = %3d fragments no.: %5d, unused: %5d\n\t\t"
468 509
 							" bucket size: %9ld - %9ld (first %9ld)\n",
469
-							h, j, (long)UN_HASH(h),
470
-						(long)((h<F_MALLOC_OPTIMIZE/ROUNDTO)?1:2)*UN_HASH(h),
471
-							qm->free_hash[h]->size
510
+							h, j, unused, (long)UN_HASH(h),
511
+						(long)((h<=F_MALLOC_OPTIMIZE/ROUNDTO)?1:2)*UN_HASH(h),
512
+							qm->free_hash[h].first->size
472 513
 				);
514
+		if (j!=qm->free_hash[h].no){
515
+			LOG(L_CRIT, "BUG: fm_status: different free frag. count: %d!=%ld"
516
+					" for hash %3d\n", j, qm->free_hash[h].no, h);
517
+		}
473 518
 		/*
474 519
 		{
475 520
 			LOG(memlog, "   %5d.[%3d:%3d] %c  address=%x  size=%d(%x)\n",
... ...
@@ -30,6 +30,8 @@
30 30
  * --------
31 31
  *  2003-05-21  on sparc64 roundto 8 even in debugging mode (so malloc'ed
32 32
  *               long longs will be 64 bit aligned) (andrei)
33
+ *  2004-07-19  support for 64 bit (2^64 mem. block) and more info
34
+ *               for the future de-fragmentation support (andrei)
33 35
  */
34 36
 
35 37
 
... ...
@@ -60,14 +62,14 @@
60 62
 #define F_MALLOC_OPTIMIZE_FACTOR 11 /*used below */
61 63
 #define F_MALLOC_OPTIMIZE  (1<<F_MALLOC_OPTIMIZE_FACTOR)
62 64
 								/* size to optimize for,
63
-									(most allocs < this size),
65
+									(most allocs <= this size),
64 66
 									must be 2^k */
65 67
 
66 68
 #define F_HASH_SIZE (F_MALLOC_OPTIMIZE/ROUNDTO + \
67
-		(32-F_MALLOC_OPTIMIZE_FACTOR)+1)
69
+		(sizeof(long)*8-F_MALLOC_OPTIMIZE_FACTOR)+1)
68 70
 
69 71
 /* hash structure:
70
- * 0 .... F_MALLOC_OPTIMIE/ROUNDTO  - small buckets, size increases with
72
+ * 0 .... F_MALLOC_OPTIMIZE/ROUNDTO  - small buckets, size increases with
71 73
  *                            ROUNDTO from bucket to bucket
72 74
  * +1 .... end -  size = 2^k, big buckets */
73 75
 
... ...
@@ -85,6 +87,10 @@ struct fm_frag{
85 87
 #endif
86 88
 };
87 89
 
90
+struct fm_frag_lnk{
91
+	struct fm_frag* first;
92
+	unsigned long no;
93
+};
88 94
 
89 95
 struct fm_block{
90 96
 	unsigned long size; /* total size */
... ...
@@ -97,7 +103,7 @@ struct fm_block{
97 103
 	struct fm_frag* first_frag;
98 104
 	struct fm_frag* last_frag;
99 105
 	
100
-	struct fm_frag* free_hash[F_HASH_SIZE];
106
+	struct fm_frag_lnk free_hash[F_HASH_SIZE];
101 107
 };
102 108
 
103 109
 
... ...
@@ -30,6 +30,9 @@
30 30
  *  ????-??-??  created by andrei
31 31
  *  2003-04-14  more debugging added in DBG_QM_MALLOC mode (andrei)
32 32
  *  2003-06-29  added qm_realloc (andrei)
33
+ *  2004-07-19  fragments book keeping code and suport for 64 bits
34
+ *               memory blocks (64 bits machine & size>=2^32) (andrei)
35
+ *              GET_HASH s/</<=/ (avoids waste of 1 hash cell) (andrei)
33 36
  */
34 37
 
35 38
 
... ...
@@ -77,10 +80,28 @@
77 80
 
78 81
 
79 82
 	/* finds the hash value for s, s=ROUNDTO multiple*/
80
-#define GET_HASH(s)   ( ((s)<QM_MALLOC_OPTIMIZE)?(s)/ROUNDTO: \
83
+#define GET_HASH(s)   ( ((s)<=QM_MALLOC_OPTIMIZE)?(s)/ROUNDTO: \
81 84
 						QM_MALLOC_OPTIMIZE/ROUNDTO+big_hash_idx((s))- \
82 85
 							QM_MALLOC_OPTIMIZE_FACTOR+1 )
83 86
 
87
+#define UN_HASH(h)	( ((h)<=(QM_MALLOC_OPTIMIZE/ROUNDTO))?(h)*ROUNDTO: \
88
+						1<<((h)-QM_MALLOC_OPTIMIZE/ROUNDTO+\
89
+							QM_MALLOC_OPTIMIZE_FACTOR-1)\
90
+					)
91
+
92
+
93
+/* mark/test used/unused frags */
94
+#define FRAG_MARK_USED(f)
95
+#define FRAG_CLEAR_USED(f)
96
+#define FRAG_WAS_USED(f)   (1)
97
+
98
+/* other frag related defines:
99
+ * MEM_COALESCE_FRAGS 
100
+ * MEM_FRAG_AVOIDANCE
101
+ */
102
+
103
+#define MEM_FRAG_AVOIDANCE
104
+
84 105
 
85 106
 /* computes hash number for big buckets*/
86 107
 inline static int big_hash_idx(int s)
... ...
@@ -90,7 +111,8 @@ inline static int big_hash_idx(int s)
90 111
 	 * index= i such that 2^i > s >= 2^(i-1)
91 112
 	 *
92 113
 	 * => index = number of the first non null bit in s*/
93
-	for (idx=31; !(s&0x80000000) ; s<<=1, idx--);
114
+	idx=sizeof(long)*8-1;
115
+	for (; !(s&(1<<(sizeof(long)*8-1))) ; s<<=1, idx--);
94 116
 	return idx;
95 117
 }
96 118
 
... ...
@@ -152,6 +174,7 @@ static inline void qm_insert_free(struct qm_block* qm, struct qm_frag* frag)
152 174
 	FRAG_END(frag)->prev_free=prev;
153 175
 	frag->u.nxt_free=f;
154 176
 	FRAG_END(f)->prev_free=frag;
177
+	qm->free_hash[hash].no++;
155 178
 }
156 179
 
157 180
 
... ...
@@ -243,10 +266,12 @@ static inline void qm_detach_free(struct qm_block* qm, struct qm_frag* frag)
243 266
 #ifdef DBG_QM_MALLOC
244 267
 static inline struct qm_frag* qm_find_free(struct qm_block* qm, 
245 268
 											unsigned int size,
269
+											int *h,
246 270
 											unsigned int *count)
247 271
 #else
248 272
 static inline struct qm_frag* qm_find_free(struct qm_block* qm, 
249
-											unsigned int size)
273
+											unsigned int size,
274
+											int* h)
250 275
 #endif
251 276
 {
252 277
 	int hash;
... ...
@@ -258,7 +283,7 @@ static inline struct qm_frag* qm_find_free(struct qm_block* qm,
258 283
 #ifdef DBG_QM_MALLOC
259 284
 			*count+=1; /* *count++ generates a warning with gcc 2.9* -Wall */
260 285
 #endif
261
-			if (f->size>=size) return f;
286
+			if (f->size>=size){ *h=hash; return f; }
262 287
 		}
263 288
 	/*try in a bigger bucket*/
264 289
 	}
... ...
@@ -282,7 +307,12 @@ int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned int new_size)
282 307
 	struct qm_frag_end* end;
283 308
 	
284 309
 	rest=f->size-new_size;
310
+#ifdef MEM_FRAG_AVOIDANCE
311
+	if ((rest> (FRAG_OVERHEAD+QM_MALLOC_OPTIMIZE))||
312
+		(rest>=(FRAG_OVERHEAD+new_size))){/* the residue fragm. is big enough*/
313
+#else
285 314
 	if (rest>(FRAG_OVERHEAD+MIN_FRAG_SIZE)){
315
+#endif
286 316
 		f->size=new_size;
287 317
 		/*split the fragment*/
288 318
 		end=FRAG_END(f);
... ...
@@ -290,6 +320,7 @@ int split_frag(struct qm_block* qm, struct qm_frag* f, unsigned int new_size)
290 320
 		n=(struct qm_frag*)((char*)end+sizeof(struct qm_frag_end));
291 321
 		n->size=rest-FRAG_OVERHEAD;
292 322
 		FRAG_END(n)->size=n->size;
323
+		FRAG_CLEAR_USED(n); /* never used */
293 324
 		qm->real_used+=FRAG_OVERHEAD;
294 325
 #ifdef DBG_QM_MALLOC
295 326
 		end->check1=END_CHECK_PATTERN1;
... ...
@@ -319,6 +350,7 @@ void* qm_malloc(struct qm_block* qm, unsigned int size)
319 350
 #endif
320 351
 {
321 352
 	struct qm_frag* f;
353
+	int hash;
322 354
 	
323 355
 #ifdef DBG_QM_MALLOC
324 356
 	unsigned int list_cntr;
... ...
@@ -333,9 +365,9 @@ void* qm_malloc(struct qm_block* qm, unsigned int size)
333 365
 
334 366
 	/*search for a suitable free frag*/
335 367
 #ifdef DBG_QM_MALLOC
336
-	if ((f=qm_find_free(qm, size, &list_cntr))!=0){
368
+	if ((f=qm_find_free(qm, size, &hash, &list_cntr))!=0){
337 369
 #else
338
-	if ((f=qm_find_free(qm, size))!=0){
370
+	if ((f=qm_find_free(qm, size, &hash))!=0){
339 371
 #endif
340 372
 		/* we found it!*/
341 373
 		/*detach it from the free list*/
... ...
@@ -345,6 +377,7 @@ void* qm_malloc(struct qm_block* qm, unsigned int size)
345 377
 		qm_detach_free(qm, f);
346 378
 		/*mark it as "busy"*/
347 379
 		f->u.is_free=0;
380
+		qm->free_hash[hash].no--;
348 381
 		/* we ignore split return */
349 382
 #ifdef DBG_QM_MALLOC
350 383
 		split_frag(qm, f, size, file, "fragm. from qm_malloc", line);
... ...
@@ -422,6 +455,7 @@ void qm_free(struct qm_block* qm, void* p)
422 455
 		qm_detach_free(qm, next);
423 456
 		size+=next->size+FRAG_OVERHEAD;
424 457
 		qm->real_used-=FRAG_OVERHEAD;
458
+		qm->free_hash[GET_HASH(next->size)].no--; /* FIXME slow */
425 459
 	}
426 460
 	
427 461
 	if (f > qm->first_frag){
... ...
@@ -436,6 +470,7 @@ void qm_free(struct qm_block* qm, void* p)
436 470
 			qm_detach_free(qm, prev);
437 471
 			size+=prev->size+FRAG_OVERHEAD;
438 472
 			qm->real_used-=FRAG_OVERHEAD;
473
+			qm->free_hash[GET_HASH(prev->size)].no--; /* FIXME slow */
439 474
 			f=prev;
440 475
 		}
441 476
 	}
... ...
@@ -530,6 +565,7 @@ void* qm_realloc(struct qm_block* qm, void* p, unsigned int size)
530 565
 					(n->u.is_free)&&((n->size+FRAG_OVERHEAD)>=diff)){
531 566
 				/* join  */
532 567
 				qm_detach_free(qm, n);
568
+				qm->free_hash[GET_HASH(n->size)].no--; /*FIXME: slow*/
533 569
 				f->size+=n->size+FRAG_OVERHEAD;
534 570
 				qm->real_used-=FRAG_OVERHEAD;
535 571
 				FRAG_END(f)->size=f->size;
... ...
@@ -582,6 +618,7 @@ void qm_status(struct qm_block* qm)
582 618
 	struct qm_frag* f;
583 619
 	int i,j;
584 620
 	int h;
621
+	int unused;
585 622
 
586 623
 	LOG(memlog, "qm_status (%p):\n", qm);
587 624
 	if (!qm) return;
... ...
@@ -591,13 +628,14 @@ void qm_status(struct qm_block* qm)
591 628
 			qm->used, qm->real_used, qm->size-qm->real_used);
592 629
 	LOG(memlog, " max used (+overhead)= %ld\n", qm->max_real_used);
593 630
 	
594
-	LOG(memlog, "dumping all allocked. fragments:\n");
631
+	LOG(memlog, "dumping all alloc'ed. fragments:\n");
595 632
 	for (f=qm->first_frag, i=0;(char*)f<(char*)qm->last_frag_end;f=FRAG_NEXT(f)
596 633
 			,i++){
597 634
 		if (! f->u.is_free){
598
-			LOG(memlog, "    %3d. %c  address=%p frag=%p size=%ld\n", i, 
635
+			LOG(memlog, "    %3d. %c  address=%p frag=%p size=%ld used=%d\n",
636
+				i, 
599 637
 				(f->u.is_free)?'a':'N',
600
-				(char*)f+sizeof(struct qm_frag), f, f->size);
638
+				(char*)f+sizeof(struct qm_frag), f, f->size, FRAG_WAS_USED(f));
601 639
 #ifdef DBG_QM_MALLOC
602 640
 			LOG(memlog, "            %s from %s: %s(%ld)\n",
603 641
 				(f->u.is_free)?"freed":"alloc'd", f->file, f->func, f->line);
... ...
@@ -608,10 +646,31 @@ void qm_status(struct qm_block* qm)
608 646
 	}
609 647
 	LOG(memlog, "dumping free list stats :\n");
610 648
 	for(h=0,i=0;h<QM_HASH_SIZE;h++){
611
-		
649
+		unused=0;
612 650
 		for (f=qm->free_hash[h].head.u.nxt_free,j=0; 
613
-				f!=&(qm->free_hash[h].head); f=f->u.nxt_free, i++, j++);
614
-			if (j) LOG(memlog, "hash= %3d. fragments no.: %5d\n", h, j);
651
+				f!=&(qm->free_hash[h].head); f=f->u.nxt_free, i++, j++){
652
+				if (!FRAG_WAS_USED(f)){
653
+					unused++;
654
+#ifdef DBG_QM_MALLOC
655
+					LOG(memlog, "unused fragm.: hash = %3d, fragment %p,"
656
+						" address %p size %lu, created from %s: %s(%lu)\n",
657
+					    h, f, (char*)f+sizeof(struct qm_frag), f->size,
658
+						f->file, f->func, f->line);
659
+#endif
660
+				}
661
+		}
662
+
663
+		if (j) LOG(memlog, "hash= %3d. fragments no.: %5d, unused: %5d\n"
664
+					"\t\t bucket size: %9ld - %9ld (first %9ld)\n",
665
+					h, j, unused, (long)UN_HASH(h),
666
+					(long)((h<=QM_MALLOC_OPTIMIZE/ROUNDTO)?1:2)*UN_HASH(h),
667
+					qm->free_hash[h].head.u.nxt_free->size
668
+				);
669
+		if (j!=qm->free_hash[h].no){
670
+			LOG(L_CRIT, "BUG: qm_status: different free frag. count: %d!=%lu"
671
+				" for hash %3d\n", j, qm->free_hash[h].no, h);
672
+		}
673
+
615 674
 	}
616 675
 	LOG(memlog, "-----------------------------\n");
617 676
 }
... ...
@@ -30,6 +30,8 @@
30 30
  * --------
31 31
  *  2003-05-21  on sparc64 roundto 8 even in debugging mode (so malloc'ed
32 32
  *               long longs will be 64 bit aligned) (andrei)
33
+ *  2004-07-19  support for 64 bit (2^64 mem. block) and more info
34
+ *               for the future de-fragmentation support (andrei)
33 35
  */
34 36
 
35 37
 
... ...
@@ -62,11 +64,11 @@
62 64
 #define QM_MALLOC_OPTIMIZE_FACTOR 11 /*used below */
63 65
 #define QM_MALLOC_OPTIMIZE  ((unsigned long)(1<<QM_MALLOC_OPTIMIZE_FACTOR))
64 66
 								/* size to optimize for,
65
-									(most allocs < this size),
67
+									(most allocs <= this size),
66 68
 									must be 2^k */
67 69
 
68 70
 #define QM_HASH_SIZE ((unsigned long)(QM_MALLOC_OPTIMIZE/ROUNDTO + \
69
-		(32-QM_MALLOC_OPTIMIZE_FACTOR)+1))
71
+		(sizeof(long)-QM_MALLOC_OPTIMIZE_FACTOR)+1))
70 72
 
71 73
 /* hash structure:
72 74
  * 0 .... QM_MALLOC_OPTIMIE/ROUNDTO  - small buckets, size increases with
... ...
@@ -100,9 +102,10 @@ struct qm_frag_end{
100 102
 
101 103
 
102 104
 
103
-struct qm_frag_full{
105
+struct qm_frag_lnk{
104 106
 	struct qm_frag head;
105 107
 	struct qm_frag_end tail;
108
+	unsigned long no;
106 109
 };
107 110
 
108 111
 
... ...
@@ -115,7 +118,7 @@ struct qm_block{
115 118
 	struct qm_frag* first_frag;
116 119
 	struct qm_frag_end* last_frag_end;
117 120
 	
118
-	struct qm_frag_full free_hash[QM_HASH_SIZE];
121
+	struct qm_frag_lnk free_hash[QM_HASH_SIZE];
119 122
 	/*struct qm_frag_end free_lst_end;*/
120 123
 };
121 124