/* MMVirtualMemory.c, Copyright (c) by George Fankhauser, Swiss Federal Institute of Technology, Computer Engineering and Networks Laboratory. TOPSY -- A Teachable Operating System. Implementation of a tiny and simple micro kernel for teaching purposes. For further information, please visit http://www.tik.ee.ethz.ch/~topsy This software is provided under the terms of the GNU General Public Licence. A full copy of the GNU GPL is provided in the file COPYING found in the development root of Topsy. */ /* File: $Source: /usr/drwho/vault/cvs/topsy/Topsy/Memory/MMVirtualMemory.c,v $ Author(s): George Fankhauser Affiliation: ETH Zuerich, TIK Version: $Revision: 1.25 $ Creation Date: Last Date of Change: $Date: 2000/06/05 14:06:27 $ by: $Author: gfa $ $Log: MMVirtualMemory.c,v $ Revision 1.25 2000/06/05 14:06:27 gfa *** empty log message *** Revision 1.24 1999/12/13 21:48:30 ruf GNU General Public Licence Update Revision 1.23 1999/04/08 11:40:12 jeker added some new files, modified some others for unix port Revision 1.22 1998/05/02 19:29:18 gfa added minor comment... Revision 1.21 1997/04/23 09:19:22 gfa formatting * Revision 1.20 1997/04/20 17:22:35 gfa * *** empty log message *** * * Revision 1.19 1997/04/06 18:47:35 gfa * source cleanup, added vmCleanup implementation * * Revision 1.18 1997/03/31 20:35:34 gfa * adapted for global name changes (syscalls) * * Revision 1.17 1997/03/27 21:57:25 gfa * removed heapaddress from mmvminit * * Revision 1.16 1997/03/27 17:28:39 gfa * changed list usage to hinting lists * * Revision 1.15 1997/03/26 09:36:06 gfa * *** empty log message *** * * Revision 1.14 1997/03/24 19:13:56 gfa * changed newListStatic to simple newList (uses less memory and code) * * Revision 1.13 1997/03/24 11:06:23 conrad * #include "Configuration.h" added * * Revision 1.12 1997/03/22 08:34:37 gfa * *** empty log message *** * * Revision 1.11 1997/03/19 18:33:05 gfa * added 256 byte separatetion between data and heap (temporary) * * Revision 1.10 1997/03/16 22:17:26 gfa * implemented vmMove and vmProtect syscalls * * Revision 1.9 1997/03/14 17:20:09 gfa * first running version of vm* * * Revision 1.8 1997/03/13 23:58:17 gfa * added alloc/free/move * * Revision 1.7 1997/03/12 09:03:01 gfa * fixed stack address parameter passing bug * * Revision 1.6 1997/03/11 08:13:09 gfa * fixed vmInit * * Revision 1.5 1997/03/09 20:47:59 gfa * first version of init * * Revision 1.4 1997/02/17 14:27:04 zitzler * incomplete version * * Revision 1.3 1997/02/13 16:10:26 zitzler * cosmetics * * Revision 1.2 1997/02/13 15:49:14 zitzler * topsy conventions (tabs, etc.) * * Revision 1.1 1997/02/12 16:24:58 zitzler * Initial revision * */ #include "MMVirtualMemory.h" #include "Topsy.h" #include "Configuration.h" #include "Memory.h" #include "Threads.h" #include "MMMapping.h" #include "MemoryLayout.h" #include "List.h" #include "Configuration.h" #define STACKSIZE TM_DEFAULTTHREADSTACKSIZE static void vmInitRegion(List list, Address startAddr, unsigned long int size, RegionStatus status, ProtectionMode pmode, ThreadId owner); static Boolean vmFreeRegion(List list, List freeList, Region region, Address address, ThreadId owner); static Boolean vmAllocRegion(Region region, Page pages, List list, List freeList, ThreadId sender, Address* addr); static Boolean isPageInRegion(Region region, Page page, int mode); static Region getRegion(Address address, int mode, ThreadId hint); static unsigned long int roundToPageUp(unsigned long int x); static unsigned long int roundUp(unsigned long int x); static AddressSpaceDesc addressSpaces[ADDRESSSPACES]; /* the heap follows always kernel code and data. this function returns * the start address in order to allow early initialization of the heap */ Address mmVmGetHeapAddress( Address kernelDataStart, unsigned long int kernelDataSize) { return (Address)roundUp((unsigned long)kernelDataStart + kernelDataSize); } /* this is topsy's initial vm region setup. note the two stacks that are * made in advance for tmInit to startup properly. */ Error mmVmInit(Address kernelCodeStart, unsigned long int kernelCodeSize, Address kernelDataStart, unsigned long int kernelDataSize, Address userCodeStart, unsigned long int userCodeSize, Address userDataStart, unsigned long int userDataSize, Address* mmStackPtr, Address* tmStackPtr) { AddressSpacePtr space; Address addr; unsigned long int size, loc; /*** setup kernel memory regions (a total of 7 initial regions) */ mmAddressSpaceRange(KERNEL, &addr, &size); space = &(addressSpaces[KERNEL]); space->regionList = listNew(); space->freeList = listNew(); space->startPage = LOGICALPAGENUMBER((unsigned long int)addr); space->endPage = roundToPageUp((unsigned long int)addr + size); /** boot/exception stack */ #warning ______________bootStackBottom_______ vmInitRegion (space->regionList, (Address)bootStackBottom, BOOTSTACKSIZE, VM_ALLOCATED, READ_WRITE_REGION, 0); /** kernel code */ vmInitRegion (space->regionList, kernelCodeStart, kernelCodeSize, VM_ALLOCATED, READ_ONLY_REGION, 0); /** kernel data */ vmInitRegion (space->regionList, kernelDataStart, kernelDataSize, VM_ALLOCATED, READ_WRITE_REGION, 0); /** heap region */ loc = (unsigned long)mmVmGetHeapAddress(kernelDataStart, kernelDataSize); vmInitRegion (space->regionList, (Address)loc, KERNELHEAPSIZE, VM_ALLOCATED, READ_WRITE_REGION, 0); /** mmStack */ loc += KERNELHEAPSIZE; vmInitRegion (space->regionList, (Address)loc, STACKSIZE, VM_ALLOCATED, READ_WRITE_REGION, 0); *mmStackPtr = (Address)loc; /** tmStack */ loc += STACKSIZE; vmInitRegion (space->regionList, (Address)loc, STACKSIZE, VM_ALLOCATED, READ_WRITE_REGION, 0); *tmStackPtr = (Address)loc; /** free region for kernel memory */ loc += STACKSIZE; vmInitRegion (space->freeList, (Address)loc, size - (loc - (unsigned long int)addr), VM_FREED, READ_WRITE_REGION, 0); /*** insert 2 regions for user code and data in users address space */ mmAddressSpaceRange(USER, &addr, &size); space = &(addressSpaces[USER]); space->regionList = listNew(); space->freeList = listNew(); space->startPage = LOGICALPAGENUMBER((unsigned long int)addr); space->endPage = roundToPageUp((unsigned long int)addr + size); /** user code */ vmInitRegion (space->regionList, userCodeStart, userCodeSize, VM_ALLOCATED, READ_ONLY_REGION, 0); /** user data */ vmInitRegion (space->regionList, userDataStart, userDataSize, VM_ALLOCATED, READ_WRITE_REGION, 0); /** free region for user memory */ vmInitRegion (space->freeList, (Address)roundUp((unsigned long int)userDataStart + userDataSize), size - roundUp(userDataSize) - roundUp(userCodeSize), VM_FREED, READ_WRITE_REGION, 0); return MM_VMINITOK; } Error mmVmAlloc(Address* addressPtr, unsigned long int size, ThreadId owner) { Region region; Boolean success; List list = addressSpaces[THREAD_MODE(owner)].regionList; List freeList = addressSpaces[THREAD_MODE(owner)].freeList; Page pages = roundToPageUp(size); listGetFirst(freeList, (void**)®ion); success = vmAllocRegion(region, pages, list, freeList, owner, addressPtr); if (success) return VM_ALLOCOK; while (region != NULL) { listGetNext(freeList, (void**)®ion); success = vmAllocRegion(region, pages, list, freeList, owner, addressPtr); if (success) { return VM_ALLOCOK; } } return VM_ALLOCFAILED; } Error mmVmFree(Address address, ThreadId sender) { List list, freeList; Region region; Boolean success; /* test aligned existence of region for given address */ region = getRegion(address, MM_VMEXACT, MMTHREADID); if (region == NULL) { WARNING("vmFree failed (no such region)"); return VM_FREEFAILED; } /* kernel threads are allowed to free kernel and user vm regions */ /* users may only remove their own regions */ if (THREAD_MODE(region->owner) == KERNEL) { if (THREAD_MODE(sender) == USER) { WARNING("vmFree: user tried to free kernel region"); return VM_FREEFAILED; } list = addressSpaces[KERNEL].regionList; freeList = addressSpaces[KERNEL].freeList; } else { list = addressSpaces[USER].regionList; freeList = addressSpaces[USER].freeList; } success = vmFreeRegion(list, freeList, region, address, sender); if (success) return VM_FREEOK; WARNING("vmFree failed (freeRegion failed)"); return VM_FREEFAILED; } Error mmVmMove(Address* addressPtr, ThreadId newOwner) { Address oldAddress = *addressPtr; Region region; Page from, to, i; Error ret; if (THREAD_MODE(newOwner) == KERNEL) { /* kernel does not want to move regions from kernel to kernel */ return VM_MOVEFAILED; } /* moving of pages for virtual memory or copying for direct mapping */ region = getRegion(oldAddress, MM_VMEXACT, MMTHREADID); if (region == NULL) { return VM_MOVEFAILED; } ret = mmVmAlloc(addressPtr, region->numOfPages * LOGICALPAGESIZE,newOwner); if (ret != VM_ALLOCOK) { return VM_MOVEFAILED; } from = LOGICALPAGENUMBER(oldAddress); to = LOGICALPAGENUMBER(*addressPtr); for (i = 0; i < region->numOfPages; i++) { if (mmMovePage(from, to) != MM_MOVEPAGEOK) { mmVmFree(addressPtr, MMTHREADID); WARNING("vmMove: page moving failed"); return VM_MOVEFAILED; } from++; to++; } mmVmFree(oldAddress, MMTHREADID); return VM_MOVEOK; } Error mmVmProtect(Address startAddr, unsigned long int size, ProtectionMode pmode, ThreadId owner) { Page page, end; Region region; if (LOGICALPAGEREMAINDER(size) != 0) return VM_PROTECTFAILED; /* pages must be in a region to protect */ region = getRegion(startAddr, MM_VMINSIDE, owner); if (region == NULL) return VM_PROTECTFAILED; /* users must own the regions */ if (THREAD_MODE(owner) == USER) { if (region->owner != owner) return VM_PROTECTFAILED; } end = LOGICALPAGENUMBER(size) + LOGICALPAGENUMBER(startAddr); for (page = LOGICALPAGENUMBER(startAddr); page < end; page++) { if (mmProtectPage(page, pmode) == MM_PROTECTPAGEFAILED) return VM_PROTECTFAILED; } return VM_PROTECTOK; } /* this kernel internal message is used to reclaim all vm resource * owned by a thread. just in case it forgets to call vmFree or it * crashes before doing it. * we assume that kernel threads do not allocate and own vmRegions * in user space. */ Error mmVmCleanup(ThreadId id) { List list; Region region; list = addressSpaces[THREAD_MODE(id)].regionList; listGetFirst(list, (void**)®ion); while (region != NULL) { if (region->owner == id) mmVmFree((Address)(region->startPage * LOGICALPAGESIZE), id); listGetNext(list, (void**)®ion); } return VM_CLEANUPOK; } static Boolean isPageInRegion(Region region, Page page, int mode) { if (mode == MM_VMEXACT) { if (region->startPage == page) return TRUE; else return FALSE; } else if (mode == MM_VMINSIDE) { if (page >= region->startPage && page < region->startPage + region->numOfPages) return TRUE; else return FALSE; } return FALSE; } static Region getRegion(Address address, int mode, ThreadId hint) { List list; AddressSpace first, then; Region region; Page page = LOGICALPAGENUMBER(address); if (mode == MM_VMEXACT) { if (LOGICALPAGEREMAINDER(address) != 0) { WARNING("getRegion: got unaligned address"); return NULL; } } if (THREAD_MODE(hint) == USER) { first = USER; then = KERNEL; } else { first = KERNEL; then = USER; } list = addressSpaces[first].regionList; listGetFirst(list, (void**)®ion); while (region != NULL) { if (isPageInRegion(region, page, mode)) return region; listGetNext(list, (void**)®ion); } list = addressSpaces[then].regionList; listGetFirst(list, (void**)®ion); while (region != NULL) { if (isPageInRegion(region, page, mode)) return region; listGetNext(list, (void**)®ion); } return NULL; } static Boolean vmFreeRegion(List list, List freeList, Region region, Address address, ThreadId sender) { unsigned long int page = LOGICALPAGENUMBER(address); if (region == NULL) return FALSE; if (region->startPage == page) { if ((region->owner == sender) || (THREAD_MODE(sender) == KERNEL)) { listRemove(list, (void*)region, NULL); listAddInFront(freeList, (void*)region, NULL); mmUnmapPages(region->startPage, region->numOfPages); return TRUE; } } return FALSE; } /* allocates a region without mapping pages. pages are mapped either * on demand (when a paged memory model is used) or they don't need * to be mapped at all for a direct mapped memory model. */ static Boolean vmAllocRegion(Region region, Page pages, List list, List freeList, ThreadId sender, Address* addr) { Region newRegion; if (region == NULL) return FALSE; if (region->numOfPages > pages) { /* make new region, split free into an allocated and a free one */ if (hmAlloc((Address*)&newRegion, sizeof(RegionDesc)) != HM_ALLOCOK) return FALSE; newRegion->owner = sender; newRegion->startPage = region->startPage; newRegion->numOfPages = pages; *addr = (Address)((newRegion->startPage) * LOGICALPAGESIZE); listAddInFront(list, newRegion, NULL); /* add new region to list */ /* adjust free region, subtract used space */ /* this region is already in the free list */ region->owner = ANY; region->startPage += pages; region->numOfPages -= pages; return TRUE; } else if (region->numOfPages == pages) { region->owner = sender; *addr = (Address)((region->startPage) * LOGICALPAGESIZE); listRemove(freeList, region, NULL); /* changing places */ listAddInFront(list, region, NULL); return TRUE; } return FALSE; } static void vmInitRegion(List list, Address startAddr, unsigned long int size, RegionStatus status, ProtectionMode pmode, ThreadId owner) { Region region; hmAlloc((Address*)®ion, sizeof(RegionDesc)); region->startPage = LOGICALPAGENUMBER((unsigned long int)startAddr); region->numOfPages = roundToPageUp(size); region->pmode = pmode; region->owner = owner; listAddInFront(list, region, NULL); #ifdef MMDEBUG ioConsolePutString("vmInitRegion: "); ioConsolePutHexInt(region->startPage * LOGICALPAGESIZE); ioConsolePutString(" -- "); ioConsolePutHexInt( (region->startPage + region->numOfPages) * LOGICALPAGESIZE); ioConsolePutString("\n"); #endif } /* round address up to logical page size and return page number */ static unsigned long int roundToPageUp(unsigned long int x) { if (LOGICALPAGEREMAINDER(x) > 0) return LOGICALPAGENUMBER(x) + 1; else return LOGICALPAGENUMBER(x); } /* round address up to logical page size and return rounded address */ static unsigned long int roundUp(unsigned long int x) { return roundToPageUp(x) * LOGICALPAGESIZE; }