1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
|
/* realloc.c - C standard library routine.
Copyright (c) 1989, 1993 Michael J. Haertel
You may redistribute this library under the terms of the
GNU Library General Public License (version 2 or any later
version) as published by the Free Software Foundation.
THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED
WARRANTY. IN PARTICULAR, THE AUTHOR MAKES NO REPRESENTATION OR
WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS
SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. */
#include <limits.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include "malloc.h"
#define MIN(A, B) ((A) < (B) ? (A) : (B))
/* Resize the given region to the new size, returning a pointer
to the (possibly moved) region. This is optimized for speed;
some benchmarks seem to indicate that greater compactness is
achieved by unconditionally allocating and copying to a
new region. */
void *
realloc (void *ptr, size_t size)
{
void *result, *previous;
int block, blocks, type;
int oldlimit;
if (!ptr)
return malloc(size);
if (!size) {
free(ptr);
return malloc(0);
}
block = BLOCK(ptr);
switch (type = _heapinfo[block].busy.type) {
case 0:
/* Maybe reallocate a large block to a small fragment. */
if (size <= BLOCKSIZE / 2) {
if ((result = malloc(size)) != NULL) {
memcpy(result, ptr, size);
#if 1
free(ptr);
#else
_free_internal(ptr);
#endif
}
return result;
}
/* The new size is a large allocation as well; see if
we can hold it in place. */
blocks = BLOCKIFY(size);
if (blocks < _heapinfo[block].busy.info.size) {
/* The new size is smaller; return excess memory
to the free list. */
_heapinfo[block + blocks].busy.type = 0;
_heapinfo[block + blocks].busy.info.size
= _heapinfo[block].busy.info.size - blocks;
_heapinfo[block].busy.info.size = blocks;
#if 1
free(ADDRESS(block + blocks));
#else
_free_internal(ADDRESS(block + blocks));
#endif
return ptr;
} else if (blocks == _heapinfo[block].busy.info.size)
/* No size change necessary. */
return ptr;
else {
/* Won't fit, so allocate a new region that will. Free
the old region first in case there is sufficient adjacent
free space to grow without moving. */
blocks = _heapinfo[block].busy.info.size;
/* Prevent free from actually returning memory to the system. */
oldlimit = _heaplimit;
_heaplimit = 0;
#if 1
free(ptr);
#else
_free_internal(ptr);
#endif
_heaplimit = oldlimit;
result = malloc(size);
if (!result) {
/* Now we're really in trouble. We have to unfree
the thing we just freed. Unfortunately it might
have been coalesced with its neighbors. */
if (_heapindex == block)
malloc(blocks * BLOCKSIZE);
else {
previous = malloc((block - _heapindex) * BLOCKSIZE);
malloc(blocks * BLOCKSIZE);
#if 1
free(previous);
#else
_free_internal(previous);
#endif
}
return NULL;
}
if (ptr != result)
memmove(result, ptr, blocks * BLOCKSIZE);
return result;
}
break;
default:
/* Old size is a fragment; type is logarithm to base two of
the fragment size. */
if ((size > 1 << (type - 1)) && (size <= 1 << type))
/* New size is the same kind of fragment. */
return ptr;
else {
/* New size is different; allocate a new space, and copy
the lesser of the new size and the old. */
result = malloc(size);
if (!result)
return NULL;
memcpy(result, ptr, MIN(size, 1 << type));
free(ptr);
return result;
}
break;
}
}
|