Browse code

malloc_test: realloc testing support

Support for stress testing realloc:
- new config variable realloc_p for enabling realloc() usage
instead of malloc() for mem tests and mem_rnd_alloc. Its value
is the percent of realloc()s from the total calls to alloc
functions (0-90%).
- new RPC: mt.mem_realloc size [unit] for manually triggering
a realloc.

Andrei Pelinescu-Onciul authored on 12/03/2010 18:38:42
Showing 1 changed files
... ...
@@ -55,11 +55,13 @@ static cmd_export_t cmds[]={
55 55
 
56 56
 struct cfg_group_malloc_test {
57 57
 	int check_content;
58
+	int realloc_p; /* realloc probability */
58 59
 };
59 60
 
60 61
 
61 62
 static struct cfg_group_malloc_test default_mt_cfg = {
62
-	0 /* check_content, off by default */
63
+	0, /* check_content, off by default */
64
+	0  /* realloc probability, 0 by default */
63 65
 };
64 66
 
65 67
 static void * mt_cfg = &default_mt_cfg;
... ...
@@ -67,7 +69,12 @@ static void * mt_cfg = &default_mt_cfg;
67 69
 static cfg_def_t malloc_test_cfg_def[] = {
68 70
 	{"check_content", CFG_VAR_INT | CFG_ATOMIC, 0, 1, 0, 0,
69 71
 		"check if allocated memory was overwritten by filling it with "
70
-		"a special pattern and checking it on free"},
72
+		"a special pattern and checking it on free."},
73
+	{"realloc_p", CFG_VAR_INT | CFG_ATOMIC, 0, 90, 0, 0,
74
+		"realloc probability in percents. During tests and mem_rnd_alloc"
75
+		" realloc_p percents of the allocations will be made by realloc'ing"
76
+		" and existing chunk. The maximum value is limited to 90, to avoid"
77
+		" very long mem_rnd_alloc runs (a realloc might also free memory)." },
71 78
 	{0, 0, 0, 0, 0, 0}
72 79
 };
73 80
 
... ...
@@ -111,6 +118,7 @@ struct allocated_list {
111 118
 	struct mem_chunk* chunks;
112 119
 	gen_lock_t lock;
113 120
 	volatile long size;
121
+	volatile int no;
114 122
 };
115 123
 
116 124
 struct allocated_list* alloc_lst;
... ...
@@ -126,6 +134,7 @@ struct rnd_time_test {
126 134
 	ticks_t stop_time;
127 135
 	ticks_t start_time;
128 136
 	unsigned long calls;
137
+	unsigned long reallocs;
129 138
 	unsigned int errs;
130 139
 	unsigned int overfl;
131 140
 	struct rnd_time_test* next;
... ...
@@ -160,6 +169,7 @@ static int mod_init(void)
160 169
 		goto error;
161 170
 	alloc_lst->chunks = 0;
162 171
 	atomic_set_long(&alloc_lst->size, 0);
172
+	atomic_set_int(&alloc_lst->no, 0);
163 173
 	if (lock_init(&alloc_lst->lock) == 0)
164 174
 		goto error;
165 175
 	rndt_lst = shm_malloc(sizeof(*rndt_lst));
... ...
@@ -214,10 +224,10 @@ static int mem_track(void* addr, unsigned long size)
214 224
 		mc->flags |=  MC_F_CHECK_CONTENTS;
215 225
 		d = addr;
216 226
 		for (r = 0; r < size/sizeof(*d); r++){
217
-			d[r]=~(unsigned long)d;
227
+			d[r]=~(unsigned long)&d[r];
218 228
 		}
219 229
 		for (i=0; i< size % sizeof(*d); i++){
220
-			((char*)&d[r])[i]=~((unsigned long)d >> i*8);
230
+			((char*)&d[r])[i]=~((unsigned long)&d[r] >> i*8);
221 231
 		}
222 232
 	}
223 233
 	lock_get(&alloc_lst->lock);
... ...
@@ -225,6 +235,7 @@ static int mem_track(void* addr, unsigned long size)
225 235
 		alloc_lst->chunks = mc;
226 236
 	lock_release(&alloc_lst->lock);
227 237
 	atomic_add_long(&alloc_lst->size, size);
238
+	atomic_inc_int(&alloc_lst->no);
228 239
 	return 0;
229 240
 error:
230 241
 	return -1;
... ...
@@ -254,6 +265,36 @@ static int mem_leak(unsigned long size)
254 265
 
255 266
 
256 267
 
268
+/* realloc a chunk, unsafe (requires external locking) version.
269
+ * @return 0 on success, -1 on error
270
+ */
271
+static int _mem_chunk_realloc_unsafe(struct mem_chunk *c, unsigned long size)
272
+{
273
+	unsigned long* d;
274
+	int r, i;
275
+	
276
+	d = shm_realloc(c->addr, size);
277
+	if (d) {
278
+		if (cfg_get(malloc_test, mt_cfg, check_content) &&
279
+				c->flags & MC_F_CHECK_CONTENTS) {
280
+			/* re-fill the test patterns (the address might have changed
281
+			   and they depend on it) */
282
+			for (r = 0; r < size/sizeof(*d); r++){
283
+				d[r]=~(unsigned long)&d[r];
284
+			}
285
+			for (i=0; i< size % sizeof(*d); i++){
286
+				((char*)&d[r])[i]=~((unsigned long)&d[r] >> i*8);
287
+			}
288
+		}
289
+		c->addr = d;
290
+		c->size = size;
291
+		return 0;
292
+	}
293
+	return -1;
294
+}
295
+
296
+
297
+
257 298
 static void mem_chunk_free(struct mem_chunk* c)
258 299
 {
259 300
 	unsigned long* d;
... ...
@@ -265,15 +306,15 @@ static void mem_chunk_free(struct mem_chunk* c)
265 306
 		d = c->addr;
266 307
 		err = 0;
267 308
 		for (r = 0; r < c->size/sizeof(*d); r++){
268
-			if (d[r]!=~(unsigned long)d)
309
+			if (d[r]!=~(unsigned long)&d[r])
269 310
 				err++;
270
-			d[r] = r; /* fill it with something else */
311
+			d[r] = (unsigned long)&d[r]; /* fill it with something else */
271 312
 		}
272 313
 		for (i=0; i< c->size % sizeof(*d); i++){
273 314
 			if (((unsigned char*)&d[r])[i] !=
274
-					(unsigned char)~((unsigned long)d >> i*8))
315
+					(unsigned char)~((unsigned long)&d[r] >> i*8))
275 316
 				err++;
276
-			((char*)&d[r])[i] = (unsigned char)((unsigned long)d >> i*8);
317
+			((char*)&d[r])[i] = (unsigned char)((unsigned long)&d[r] >> i*8);
277 318
 		}
278 319
 		if (err)
279 320
 			ERR("%d errors while checking %ld bytes at %p\n", err, c->size, d);
... ...
@@ -297,8 +338,10 @@ static unsigned long mem_unleak(unsigned long size)
297 338
 	struct mem_chunk* t;
298 339
 	struct mem_chunk** min_chunk;
299 340
 	unsigned long freed;
341
+	unsigned int no;
300 342
 	
301 343
 	freed = 0;
344
+	no = 0;
302 345
 	min_chunk = 0;
303 346
 	lock_get(&alloc_lst->lock);
304 347
 	if (size>=atomic_get_long(&alloc_lst->size)){
... ...
@@ -307,6 +350,7 @@ static unsigned long mem_unleak(unsigned long size)
307 350
 			t = *mc;
308 351
 			mem_chunk_free(t);
309 352
 			freed += t->size;
353
+			no++;
310 354
 			*mc = t->next;
311 355
 			shm_free(t);
312 356
 		}
... ...
@@ -318,6 +362,7 @@ static unsigned long mem_unleak(unsigned long size)
318 362
 				t = *mc;
319 363
 				mem_chunk_free(t);
320 364
 				freed += t->size;
365
+				no++;
321 366
 				*mc = t->next;
322 367
 				shm_free(t);
323 368
 				continue;
... ...
@@ -332,16 +377,52 @@ static unsigned long mem_unleak(unsigned long size)
332 377
 			t = *mc;
333 378
 			mem_chunk_free(t);
334 379
 			freed += t->size;
380
+			no++;
335 381
 			*mc = (*mc)->next;
336 382
 			shm_free(t);
337 383
 		}
338 384
 	}
339 385
 	lock_release(&alloc_lst->lock);
340 386
 	atomic_add_long(&alloc_lst->size, -freed);
387
+	atomic_add_int(&alloc_lst->no, -no);
341 388
 	return freed;
342 389
 }
343 390
 
344 391
 
392
+
393
+/** realloc randomly size bytes.
394
+ * Chooses randomly a previously allocated chunk and realloc's it.
395
+ * @param size - size.
396
+ * @param diff - filled with difference, >= 0 means more bytes were alloc.,
397
+ *               < 0 means bytes were freed.
398
+ * @return  >= 0 on success, -1 on error/ not found
399
+ * (empty list is a valid error reason)
400
+ */
401
+static int mem_rnd_realloc(unsigned long size, long* diff)
402
+{
403
+	struct mem_chunk* t;
404
+	int ret;
405
+	int target, i;
406
+	
407
+	*diff = 0;
408
+	ret = -1;
409
+	lock_get(&alloc_lst->lock);
410
+		target = fastrand_max(atomic_get_int(&alloc_lst->no));
411
+		for (t = alloc_lst->chunks, i=0; t; t=t->next, i++ ){
412
+			if (target == i) {
413
+				*diff = (long)size - (long)t->size;
414
+				if ((ret=_mem_chunk_realloc_unsafe(t, size)) < 0)
415
+					*diff = 0;
416
+				break;
417
+			}
418
+		}
419
+	lock_release(&alloc_lst->lock);
420
+	atomic_add_long(&alloc_lst->size, *diff);
421
+	return ret;
422
+}
423
+
424
+
425
+
345 426
 #define MIN_ulong(a, b) \
346 427
 	(unsigned long)((unsigned long)(a)<(unsigned long)(b)?(a):(b))
347 428
 
... ...
@@ -355,13 +436,21 @@ static int mem_rnd_leak(unsigned long min, unsigned long max,
355 436
 {
356 437
 	unsigned long size;
357 438
 	unsigned long crt_size, crt_min;
358
-	int err;
439
+	long diff;
440
+	int err, p;
359 441
 	
360 442
 	size = total_size;
361 443
 	err = 0;
362 444
 	while(size){
363 445
 		crt_min = MIN_ulong(min, size);
364 446
 		crt_size = fastrand_max(MIN_ulong(max, size) - crt_min) + crt_min;
447
+		p = cfg_get(malloc_test, mt_cfg, realloc_p);
448
+		if (p && ((fastrand_max(99) +1) <= p)){
449
+			if (mem_rnd_realloc(crt_size, &diff) == 0){
450
+				size -= diff;
451
+				continue;
452
+			} /* else fallback to normal alloc. */
453
+		}
365 454
 		size -= crt_size;
366 455
 		err += mem_leak(crt_size) < 0;
367 456
 	}
... ...
@@ -377,6 +466,8 @@ static ticks_t tst_timer(ticks_t ticks, struct timer_ln* tl, void* data)
377 466
 	ticks_t next_int;
378 467
 	ticks_t max_int;
379 468
 	unsigned long crt_size, crt_min, remaining;
469
+	long diff;
470
+	int p;
380 471
 	
381 472
 	tst = data;
382 473
 	
... ...
@@ -392,10 +483,19 @@ static ticks_t tst_timer(ticks_t ticks, struct timer_ln* tl, void* data)
392 483
 	crt_min = MIN_ulong(tst->min, remaining);
393 484
 	crt_size = fastrand_max(MIN_ulong(tst->max, remaining) - crt_min) +
394 485
 				crt_min;
486
+	p = cfg_get(malloc_test, mt_cfg, realloc_p);
487
+	if (p && ((fastrand_max(99) +1) <= p)) {
488
+		if (mem_rnd_realloc(crt_size, &diff) == 0){
489
+			tst->crt -= diff;
490
+			tst->reallocs++;
491
+			goto skip_alloc;
492
+		}
493
+	}
395 494
 	if (mem_leak(crt_size) >= 0)
396 495
 		tst->crt += crt_size;
397 496
 	else
398 497
 		tst->errs ++;
498
+skip_alloc:
399 499
 	tst->calls++;
400 500
 	
401 501
 	if (TICKS_GT(tst->stop_time, ticks)) {
... ...
@@ -635,6 +735,39 @@ static void rpc_mt_alloc(rpc_t* rpc, void* c)
635 735
 }
636 736
 
637 737
 
738
+static const char* rpc_mt_realloc_doc[2] = {
739
+	"Reallocates the specified number of bytes from a pre-allocated"
740
+	" randomly selected memory chunk. If no pre-allocated memory"
741
+	" chunks exists, it will fail."
742
+	" Make sure mt.mem_used is non 0 or call mt.mem_alloc prior to calling"
743
+	" this function."
744
+	" Returns the difference in bytes (<0 if bytes were freed, >0 if more"
745
+	" bytes were allocated)."
746
+	"Use b|k|m|g to specify the desired size unit",
747
+	0
748
+};
749
+
750
+static void rpc_mt_realloc(rpc_t* rpc, void* c)
751
+{
752
+	int size;
753
+	int rs;
754
+	long diff;
755
+	
756
+	if (rpc->scan(c, "d", &size) < 1) {
757
+		return;
758
+	}
759
+	rs=rpc_get_size_mod(rpc, c);
760
+	if (rs<0)
761
+		/* fault already generated on rpc_get_size_mod() error */
762
+		return;
763
+	if (mem_rnd_realloc((unsigned long)size << rs, &diff) < 0) {
764
+		rpc->fault(c, 400, "memory allocation failed");
765
+	}
766
+	rpc->add(c, "d", diff >> rs);
767
+	return;
768
+}
769
+
770
+
638 771
 static const char* rpc_mt_free_doc[2] = {
639 772
 	"Frees the specified number of bytes, previously allocated by one of the"
640 773
 	" other malloc_test functions (e.g. mt.mem_alloc or the script "
... ...
@@ -666,8 +799,9 @@ static void rpc_mt_free(rpc_t* rpc, void* c)
666 799
 
667 800
 
668 801
 static const char* rpc_mt_used_doc[2] = {
669
-	"Returns how many bytes are currently allocated via the mem_alloc module"
670
-	" functions. Use b|k|m|g to specify the desired size unit.",
802
+	"Returns how many memory chunks and how many bytes are currently"
803
+	" allocated via the mem_alloc module functions."
804
+	" Use b|k|m|g to specify the desired size unit.",
671 805
 	0
672 806
 };
673 807
 
... ...
@@ -681,6 +815,7 @@ static void rpc_mt_used(rpc_t* rpc, void* c)
681 815
 	if (rs<0)
682 816
 		/* fault already generated on rpc_get_size_mod() error */
683 817
 		return;
818
+	rpc->add(c, "d", atomic_get_int(&alloc_lst->no));
684 819
 	rpc->add(c, "d", (int)(atomic_get_long(&alloc_lst->size) >> rs));
685 820
 	return;
686 821
 }
... ...
@@ -698,6 +833,7 @@ static void rpc_mt_rnd_alloc(rpc_t* rpc, void* c)
698 833
 {
699 834
 	int min, max, total_size;
700 835
 	int rs;
836
+	int err;
701 837
 	
702 838
 	if (rpc->scan(c, "ddd", &min, &max, &total_size) < 3) {
703 839
 		return;
... ...
@@ -710,10 +846,10 @@ static void rpc_mt_rnd_alloc(rpc_t* rpc, void* c)
710 846
 		rpc->fault(c, 400, "invalid parameter values");
711 847
 		return;
712 848
 	}
713
-	if (mem_rnd_leak((unsigned long)min << rs,
714
-					 (unsigned long)max << rs,
715
-					 (unsigned long)total_size <<rs ) < 0) {
716
-		rpc->fault(c, 400, "memory allocation failed");
849
+	if ((err=mem_rnd_leak((unsigned long)min << rs,
850
+						 (unsigned long)max << rs,
851
+						 (unsigned long)total_size <<rs )) < 0) {
852
+		rpc->fault(c, 400, "memory allocation failed (%d errors)", -err);
717 853
 	}
718 854
 	return;
719 855
 }
... ...
@@ -849,7 +985,7 @@ static void rpc_mt_test_list(rpc_t* rpc, void* c)
849 985
 		for (tst = rndt_lst->tests; tst; tst=tst->next)
850 986
 			if (tst->id == id || id == -1) {
851 987
 				rpc->add(c, "{", &h);
852
-				rpc->struct_add(h, "dddddddddd",
988
+				rpc->struct_add(h, "ddddddddddd",
853 989
 						"ID           ",  tst->id,
854 990
 						"run time (s) ", (int)TICKS_TO_S((
855 991
 											TICKS_LE(tst->stop_time,
... ...
@@ -860,7 +996,8 @@ static void rpc_mt_test_list(rpc_t* rpc, void* c)
860 996
 												get_ticks_raw()) ? 0 :
861 997
 										(int)TICKS_TO_S(tst->stop_time -
862 998
 														get_ticks_raw()),
863
-						"allocations  ", (int)tst->calls,
999
+						"total calls  ", (int)tst->calls,
1000
+						"reallocs     ", (int)tst->reallocs,
864 1001
 						"errors       ", (int)tst->errs,
865 1002
 						"overflows    ", (int)tst->overfl,
866 1003
 						"total alloc  ", (int)((tst->crt +
... ...
@@ -879,6 +1016,7 @@ static void rpc_mt_test_list(rpc_t* rpc, void* c)
879 1016
 static rpc_export_t mt_rpc[] = {
880 1017
 	{"mt.mem_alloc", rpc_mt_alloc, rpc_mt_alloc_doc, 0},
881 1018
 	{"mt.mem_free", rpc_mt_free, rpc_mt_free_doc, 0},
1019
+	{"mt.mem_realloc", rpc_mt_realloc, rpc_mt_realloc_doc, 0},
882 1020
 	{"mt.mem_used", rpc_mt_used, rpc_mt_used_doc, 0},
883 1021
 	{"mt.mem_rnd_alloc", rpc_mt_rnd_alloc, rpc_mt_rnd_alloc_doc, 0},
884 1022
 	{"mt.mem_test_start", rpc_mt_test_start, rpc_mt_test_start_doc, 0},