ack/util/led/memory.c

625 lines
15 KiB
C
Raw Normal View History

1987-03-09 19:15:41 +00:00
/*
* (c) copyright 1987 by the Vrije Universiteit, Amsterdam, The Netherlands.
* See the copyright notice in the ACK home directory, in the file "Copyright".
*/
#ifndef lint
static char rcsid[] = "$Header$";
#endif
/*
* Memory manager. Memory is divided into NMEMS pieces. There is a struct
* for each piece telling where it is, how many bytes are used, and how may
* are left. If a request for core doesn't fit in the left bytes, an sbrk()
* is done and pieces after the one that requested the growth are moved up.
1986-12-02 16:16:08 +00:00
*
* Unfortunately, we cannot use sbrk to request more memory, because its
* result cannot be trusted. More specifically, it does not work properly
* on 2.9 BSD, and probably does not work properly on 2.8 BSD and V7 either.
* The problem is that "sbrk" adds the increment to the current "break"
* WITHOUT testing the carry bit. So, if your break is at 40000, and
* you "sbrk(30000)", it will succeed, but your break will be at 4464
* (70000 - 65536).
*/
1987-08-11 15:31:09 +00:00
#include <stdio.h>
#include <out.h>
#include "const.h"
#include "assert.h"
#include "debug.h"
#include "memory.h"
static copy_down();
static copy_up();
static free_saved_moduls();
struct memory mems[NMEMS];
bool incore = TRUE; /* TRUE while everything can be kept in core. */
1985-01-10 13:35:39 +00:00
ind_t core_position = (ind_t)0; /* Index of current module. */
1986-12-02 16:16:08 +00:00
#define GRANULE 64 /* power of 2 */
static char *BASE;
static ind_t refused;
sbreak(incr)
ind_t incr;
{
extern char *brk();
unsigned int inc;
incr = (incr + (GRANULE - 1)) & ~(GRANULE - 1);
inc = incr;
if ((refused && refused < incr) ||
(sizeof(char *) < sizeof(long) &&
(inc != incr || BASE + inc < BASE)) ||
(int) brk(BASE + incr) == -1) {
if (!refused || refused > incr)
refused = incr;
1986-12-02 16:16:08 +00:00
return -1;
}
BASE += incr;
1986-12-02 16:16:08 +00:00
return 0;
}
/*
* Initialize some pieces of core. We hope that this will be our last
* real allocation, meaning we've made the right choices.
*/
init_core()
{
register char *base;
1985-01-10 13:35:39 +00:00
register ind_t total_size;
register struct memory *mem;
1986-12-02 16:16:08 +00:00
extern char *sbrk();
1985-01-10 13:35:39 +00:00
#include "mach.c"
1987-04-08 17:15:30 +00:00
#define ALIGN 8 /* minimum alignment for pieces */
#define AT_LEAST (ind_t)2*ALIGN /* See comment about string areas. */
1985-01-10 13:35:39 +00:00
total_size = (ind_t)0; /* Will accumulate the sizes. */
BASE = base = sbrk(0); /* First free. */
1987-04-08 17:15:30 +00:00
if ((int)base % ALIGN) {
base = sbrk(ALIGN - (int)base % ALIGN);
BASE = base = sbrk(0);
}
/*
* String areas are special-cased. The first byte is unused as a way to
* distinguish a name without string from a name which has the first
* string in the string area.
*/
1987-04-08 17:15:30 +00:00
for (mem = mems; mem < &mems[NMEMS]; mem++) {
mem->mem_base = base;
mem->mem_full = (ind_t)0;
if (mem == &mems[ALLOLCHR] || mem == &mems[ALLOGCHR]) {
if (mem->mem_left == 0) {
mem->mem_left = ALIGN;
total_size += ALIGN;
base += ALIGN;
}
base += mem->mem_left;
total_size += mem->mem_left;
mem->mem_left--;
mem->mem_full++;
}
else {
base += mem->mem_left; /* Each piece will start after prev. */
total_size += mem->mem_left;
}
}
1986-12-02 16:16:08 +00:00
if (sbreak(total_size) == -1) {
incore = FALSE; /* In core strategy failed. */
1986-12-02 16:16:08 +00:00
if (sbreak(AT_LEAST) == -1)
fatal("no core at all");
1986-12-02 16:16:08 +00:00
base = BASE;
for (mem = mems; mem < &mems[NMEMS]; mem++) {
mem->mem_base = base;
1987-04-08 17:15:30 +00:00
if (mem == &mems[ALLOLCHR] || mem == &mems[ALLOGCHR]) {
base += ALIGN;
mem->mem_left = ALIGN - 1;
mem->mem_full = 1;
}
else {
mem->mem_full = (ind_t)0;
mem->mem_left = 0;
}
1986-12-02 16:16:08 +00:00
}
}
}
/*
* Allocate an extra block of `incr' bytes and move all pieces with index
* higher than `piece' up with the size of the block.
* Move up as much as possible, if "incr" fails.
*/
static ind_t
move_up(piece, incr)
register int piece;
1985-01-10 13:35:39 +00:00
register ind_t incr;
{
register struct memory *mem;
1987-08-11 15:31:09 +00:00
#ifndef NOSTATISTICS
extern int statistics;
#endif
debug("move_up(%d, %d)\n", piece, (int)incr, 0, 0);
while (incr > 0 && sbreak(incr) == -1)
incr -= INCRSIZE;
if (incr <= 0) {
incr = 0;
return (ind_t) 0;
}
1987-08-11 15:31:09 +00:00
#ifndef NOSTATISTICS
if (statistics) fprintf(stderr,"moving up %X\n", (long) incr);
#endif
for (mem = &mems[NMEMS - 1]; mem > &mems[piece]; mem--)
copy_up(mem, incr);
mems[piece].mem_left += incr;
return incr;
}
extern int passnumber;
/*
* This routine is called if `piece' needs `incr' bytes and the system won't
* give them. We first steal the free bytes of all lower pieces and move them
* and `piece' down. If that doesn't give us enough bytes, we steal the free
* bytes of all higher pieces and move them up. We return whether we have
* enough bytes, the first or the second time.
*/
static bool
1987-04-01 08:20:39 +00:00
compact(piece, incr, flag)
register int piece;
1985-01-10 13:35:39 +00:00
register ind_t incr;
1987-04-01 08:20:39 +00:00
#define NORMAL 0
#define FREEZE 1
#define FORCED 2
{
register ind_t gain, size;
register struct memory *mem;
1987-04-08 17:15:30 +00:00
int min = piece, max = piece;
#define SHIFT_COUNT 2 /* let pieces only contribute if their free
memory is more than 1/2**SHIFT_COUNT * 100 %
of its occupied memory
*/
1987-04-01 08:20:39 +00:00
debug("compact(%d, %d, %d)\n", piece, (int)incr, flag, 0);
for (mem = &mems[0]; mem < &mems[NMEMS - 1]; mem++) {
assert(mem->mem_base + mem->mem_full + mem->mem_left == (mem+1)->mem_base);
}
1987-04-08 17:15:30 +00:00
mem = &mems[piece];
if (flag == NORMAL) {
/* try and gain a bit more than needed */
gain = (mem->mem_full + incr) >> SHIFT_COUNT;
if (incr < gain) incr = gain;
}
/*
* First, check that moving will result in enough space
*/
1987-04-01 08:20:39 +00:00
if (flag != FREEZE) {
1987-04-08 17:15:30 +00:00
gain = mem->mem_left;
for (mem = &mems[piece-1]; mem >= &mems[0]; mem--) {
/*
* Don't give it all away!
* If this does not give us enough, bad luck
*/
1987-04-01 08:20:39 +00:00
if (flag == FORCED)
size = 0;
1987-04-08 17:15:30 +00:00
else {
size = mem->mem_full >> SHIFT_COUNT;
if (size == 0) size = mem->mem_left >> 1;
}
if (mem->mem_left >= size)
gain += (mem->mem_left - size) & ~(ALIGN - 1);
1987-04-08 17:15:30 +00:00
if (gain >= incr) {
min = mem - &mems[0];
break;
}
}
if (min == piece)
for (mem = &mems[piece+1]; mem <= &mems[NMEMS - 1]; mem++) {
/*
* Don't give it all away!
* If this does not give us enough, bad luck
*/
if (flag == FORCED)
size = 0;
else {
size = mem->mem_full >> SHIFT_COUNT;
if (size == 0) size = mem->mem_left >> 1;
}
if (mem->mem_left >= size)
gain += (mem->mem_left - size) & ~(ALIGN - 1);
if (gain >= incr) {
max = mem - &mems[0];
break;
}
}
if (min == piece) {
min = 0;
if (max == piece) max = 0;
}
if (gain < incr) return 0;
}
1987-04-08 17:15:30 +00:00
else {
min = 0;
max = NMEMS - 1;
}
gain = 0;
1987-04-08 17:15:30 +00:00
for (mem = &mems[min]; mem != &mems[piece]; mem++) {
/* Here memory is inserted before a piece. */
1985-01-10 13:35:39 +00:00
assert(passnumber == FIRST || gain == (ind_t)0);
1987-04-08 17:15:30 +00:00
if (gain) copy_down(mem, gain);
1987-04-01 08:20:39 +00:00
if (flag == FREEZE || gain < incr) {
if (flag != NORMAL) size = 0;
1987-04-08 17:15:30 +00:00
else {
size = mem->mem_full >> SHIFT_COUNT;
if (size == 0) size = mem->mem_left >> 1;
}
if (mem->mem_left >= size) {
size = (mem->mem_left - size) & ~(ALIGN - 1);
gain += size;
mem->mem_left -= size;
}
}
}
/*
* Now mems[piece]:
*/
1987-04-08 17:15:30 +00:00
if (gain) copy_down(mem, gain);
gain += mem->mem_left;
mem->mem_left = 0;
if (gain < incr) {
1985-01-10 13:35:39 +00:00
register ind_t up = (ind_t)0;
1987-04-08 17:15:30 +00:00
for (mem = &mems[max]; mem > &mems[piece]; mem--) {
/* Here memory is appended after a piece. */
1987-04-01 08:20:39 +00:00
if (flag == FREEZE || gain + up < incr) {
1987-04-08 17:15:30 +00:00
if (flag != NORMAL) size = 0;
else {
size = mem->mem_full >> SHIFT_COUNT;
if (size == 0) size = mem->mem_left >> 1;
}
if (mem->mem_left >= size) {
size = (mem->mem_left - size) & ~(ALIGN - 1);
up += size;
mem->mem_left -= size;
}
}
1987-04-08 17:15:30 +00:00
if (up) copy_up(mem, up);
}
gain += up;
}
1986-12-02 16:16:08 +00:00
mems[piece].mem_left += gain;
1987-04-01 08:20:39 +00:00
assert(flag == FREEZE || gain >= incr);
for (mem = &mems[0]; mem < &mems[NMEMS - 1]; mem++) {
assert(mem->mem_base + mem->mem_full + mem->mem_left == (mem+1)->mem_base);
}
return gain >= incr;
}
/*
* The bytes of `mem' must be moved `dist' down in the address space.
* We copy the bytes from low to high, because the tail of the new area may
* overlap with the old area, but we do not want to overwrite them before they
* are copied.
*/
static
copy_down(mem, dist)
register struct memory *mem;
1985-01-10 13:35:39 +00:00
ind_t dist;
{
register char *old;
register char *new;
1985-01-10 13:35:39 +00:00
register ind_t size;
size = mem->mem_full;
old = mem->mem_base;
new = old - dist;
mem->mem_base = new;
while (size--)
*new++ = *old++;
}
/*
* The bytes of `mem' must be moved `dist' up in the address space.
* We copy the bytes from high to low, because the tail of the new area may
* overlap with the old area, but we do not want to overwrite them before they
* are copied.
*/
static
copy_up(mem, dist)
register struct memory *mem;
1985-01-10 13:35:39 +00:00
ind_t dist;
{
register char *old;
register char *new;
1985-01-10 13:35:39 +00:00
register ind_t size;
size = mem->mem_full;
old = mem->mem_base + size;
new = old + dist;
while (size--)
*--new = *--old;
mem->mem_base = new;
}
1987-04-01 08:20:39 +00:00
static int alloctype = NORMAL;
/*
* Add `size' bytes to the bytes already allocated for `piece'. If it has no
* free bytes left, ask them from memory or, if that fails, from the free
* bytes of other pieces. The offset of the new area is returned. No matter
* how many times the area is moved, because of another allocate, this offset
* remains valid.
*/
1985-01-10 13:35:39 +00:00
ind_t
alloc(piece, size)
int piece;
1985-01-10 13:35:39 +00:00
register long size;
{
1985-01-10 13:35:39 +00:00
register ind_t incr = 0;
ind_t left = mems[piece].mem_left;
1985-01-10 13:35:39 +00:00
register ind_t full = mems[piece].mem_full;
assert(passnumber == FIRST || (!incore && piece == ALLOMODL));
1985-01-10 13:35:39 +00:00
if (size == (long)0)
return full;
1985-01-10 13:35:39 +00:00
if (size != (ind_t)size)
return BADOFF;
1988-04-21 18:53:31 +00:00
switch(piece) {
case ALLOMODL:
case ALLORANL:
size = int_align(size);
}
if (size - left > 0)
incr = ((size - left + (INCRSIZE - 1)) / INCRSIZE) * INCRSIZE;
1987-04-01 08:20:39 +00:00
if (incr == 0 ||
(incr < left + full && (incr -= move_up(piece, left + full)) <= 0) ||
move_up(piece, incr) == incr ||
1987-04-01 08:20:39 +00:00
compact(piece, size, alloctype)) {
mems[piece].mem_full += size;
mems[piece].mem_left -= size;
return full;
} else {
incore = FALSE;
return BADOFF;
}
}
/*
* Same as alloc() but for a piece which really needs it. If the first
* attempt fails, release the space occupied by other pieces and try again.
*/
1985-01-10 13:35:39 +00:00
ind_t
hard_alloc(piece, size)
1985-01-10 13:35:39 +00:00
register int piece;
register long size;
{
1985-01-10 13:35:39 +00:00
register ind_t ret;
register int i;
1985-01-10 13:35:39 +00:00
if (size != (ind_t)size)
return BADOFF;
1987-04-01 08:20:39 +00:00
if ((ret = alloc(piece, size)) != BADOFF) {
return ret;
1987-04-01 08:20:39 +00:00
}
/*
* Deallocate what we don't need.
*/
for (i = 0; i < NMEMS; i++) {
switch (i) {
case ALLOGLOB:
case ALLOGCHR:
case ALLOSYMB:
case ALLOARCH:
case ALLOMODL:
1988-01-11 18:24:34 +00:00
case ALLORANL:
break; /* Do not try to deallocate this. */
default:
dealloc(i);
break;
}
}
free_saved_moduls();
1987-04-08 17:15:30 +00:00
if ((ret = alloc(piece, size)) != BADOFF) {
return ret;
}
alloctype = FORCED;
1987-04-01 08:20:39 +00:00
ret = alloc(piece, size);
alloctype = NORMAL;
return ret;
}
/*
* We don't need the previous modules, so we put the current module
* at the start of the piece allocated for module contents, thereby
* overwriting the saved modules, and release its space.
*/
static
free_saved_moduls()
{
1985-01-10 13:35:39 +00:00
register ind_t size;
register char *old, *new;
register struct memory *mem = &mems[ALLOMODL];
size = mem->mem_full - core_position;
new = mem->mem_base;
old = new + core_position;
while (size--)
*new++ = *old++;
mem->mem_full -= core_position;
mem->mem_left += core_position;
1985-01-10 13:35:39 +00:00
core_position = (ind_t)0;
}
/*
* The piece of memory with index `piece' is no longer needed.
* We take care that it can be used by compact() later, if needed.
*/
dealloc(piece)
register int piece;
{
/*
* Some pieces need their memory throughout the program.
*/
assert(piece != ALLOGLOB);
assert(piece != ALLOGCHR);
assert(piece != ALLOSYMB);
assert(piece != ALLOARCH);
mems[piece].mem_left += mems[piece].mem_full;
1985-01-10 13:35:39 +00:00
mems[piece].mem_full = (ind_t)0;
}
char *
core_alloc(piece, size)
register int piece;
1985-01-10 13:35:39 +00:00
register long size;
{
1985-01-10 13:35:39 +00:00
register ind_t off;
1987-05-21 10:10:27 +00:00
if ((off = alloc(piece, size)) == BADOFF)
return (char *)0;
return address(piece, off);
}
core_free(piece, p)
int piece;
char *p;
{
char *q = address(piece, mems[piece].mem_full);
assert(p < q);
1988-01-11 18:24:34 +00:00
switch(sizeof(unsigned) == sizeof(char *)) {
case 1:
mems[piece].mem_full -= (unsigned) (q - p);
mems[piece].mem_left += (unsigned) (q - p);
break;
default:
mems[piece].mem_full -= (ind_t) q - (ind_t) p;
mems[piece].mem_left += (ind_t) q - (ind_t) p;
break;
}
}
/*
* Reset index into piece of memory for modules and
* take care that the allocated pieces will not be moved.
*/
freeze_core()
{
register int i;
1985-01-10 13:35:39 +00:00
core_position = (ind_t)0;
if (incore)
return;
for (i = 0; i < NMEMS; i++) {
switch (i) {
case ALLOGLOB:
case ALLOGCHR:
case ALLOSYMB:
case ALLOARCH:
break; /* Do not try to deallocate this. */
default:
dealloc(i);
break;
}
}
1987-04-01 08:20:39 +00:00
compact(NMEMS - 1, (ind_t)0, FREEZE);
}
/* ------------------------------------------------------------------------- */
/*
* To transform the various pieces of the output in core to the file format,
* we must order the bytes in the ushorts and longs as ACK prescribes.
*/
write_bytes()
{
ushort nsect;
1985-01-10 13:35:39 +00:00
long offchar;
register struct memory *mem;
extern ushort NLocals, NGlobals;
extern long NLChars, NGChars;
extern int flagword;
extern struct outhead outhead;
extern struct outsect outsect[];
extern char *outputname;
int sectionno = 0;
1985-01-10 13:35:39 +00:00
nsect = outhead.oh_nsect;
offchar = OFF_CHAR(outhead);
/*
* We allocated two areas: one for local and one for global names.
* Also, we used another kind of on_foff than on file.
* At the end of the global area we have put the section names.
*/
if (!(flagword & SFLAG)) {
namecpy((struct outname *)mems[ALLOLOCL].mem_base,
NLocals,
offchar
);
namecpy((struct outname *)mems[ALLOGLOB].mem_base,
NGlobals + nsect,
offchar + NLChars
);
}
if (! wr_open(outputname)) {
fatal("can't create %s", outputname);
}
/*
* These pieces must always be written.
*/
wr_ohead(&outhead);
wr_sect(outsect, nsect);
1985-01-10 13:35:39 +00:00
for (mem = &mems[ALLOEMIT]; mem < &mems[ALLORELO]; mem++)
wrt_emit(mem->mem_base, sectionno++, mem->mem_full);
/*
* The rest depends on the flags.
*/
if (flagword & (RFLAG|CFLAG))
wr_relo((struct outrelo *) mems[ALLORELO].mem_base,
outhead.oh_nrelo);
if (!(flagword & SFLAG)) {
wr_name((struct outname *) mems[ALLOLOCL].mem_base,
NLocals);
wr_name((struct outname *) mems[ALLOGLOB].mem_base,
NGlobals+nsect);
wr_string(mems[ALLOLCHR].mem_base + 1, (long)NLChars);
wr_string(mems[ALLOGCHR].mem_base + 1, (long)NGChars);
#ifdef SYMDBUG
wr_dbug(mems[ALLODBUG].mem_base, mems[ALLODBUG].mem_full);
#endif SYMDBUG
}
wr_close();
}
namecpy(name, nname, offchar)
register struct outname *name;
register ushort nname;
register long offchar;
{
while (nname--) {
if (name->on_foff)
name->on_foff += offchar - 1;
name++;
}
}