#include #include #include #include "main.h" #include "synch.h" #include "tlb.h" #include "disk.h" #define DEBUG_VM 0 #if DEBUG_VM #define DEBUG(p) { printf p; fflush(stdout); } #else #define DEBUG(p) #endif typedef int bool; enum {false = 0, true = 1}; #define OFFSETBITS (PAGESHIFT) /* 12 bits in offset */ #define PAGEBITS (32 - PAGESHIFT) /* 20 bits in page number */ #define NUMVIRTUALPAGES (1 << PAGEBITS) /* 2^PAGEBITS pages in virtual address space */ #define NUMPHYSPAGES (PHYS_MEM_SIZE / PAGESIZE) #define NUMSWAPPAGES (MAX_BLOCK) static semaphore_t vmsem; /* So only one thread can be in here at a time */ /* The page table for virtual pages (VP) for the process: */ typedef struct _VPE VPE; struct _VPE { unsigned vpn; /* which virtual page we're talking about */ bool mapped; /* is this page mapped in the address space? */ bool inmem; /* is this page in memory? (if false => on disk) */ unsigned pfn; /* page frame in memory if in memory (only valid iff inmem) */ unsigned diskblock; /* location of swap space on disk (always valid) */ unsigned protection; /* protection bits */ }; static VPE PageTable[NUMVIRTUALPAGES]; /* The page frame info, one record per page frame (PF): */ typedef struct _PFE PFE; struct _PFE { bool full; /* is there a page in this page frame? */ unsigned vpn; /* which virtual page is in here */ }; static PFE PageFrame[NUMPHYSPAGES]; int pagesfree; /* The swap space (SS) info, on record per page on disk: */ typedef struct _SSE SSE; struct _SSE { bool full; /* is there a page in this block of swap space? */ unsigned vpn; /* which virtual page is in here */ }; static SSE SwapSpace[NUMSWAPPAGES]; typedef struct _Seg Seg; struct _Seg { unsigned min; unsigned max; }; /* Keep track of valid sections of memory: */ static Seg TextSeg; static Seg DataSeg; static Seg StackSeg; /* The VM system is composed of three segments: * text, data, and stack * * Before execution of the program starts, mipsi calls mt_vm_initialize * with the range of addresses that are valid for the text, data, * and stack segments. Any virtual address needs to be in the * ranges specified by this call. Any virtual address outside these * ranges can be considered an invalid address. Accesses to invalid * addresses should cause the system to print out an error message * and die. */ /* initialize the segments for the VM system */ void mt_vm_initialize(unsigned min_text_addr, unsigned max_text_addr, unsigned min_stack_addr, unsigned max_stack_addr, unsigned min_data_addr, unsigned max_data_addr) { unsigned i; DEBUG(("mt_vm_initialize: Initializing VM system\n")); DEBUG((" %d virtual pages, %d physical pages, %d swap pages\n", NUMVIRTUALPAGES, NUMPHYSPAGES, NUMSWAPPAGES)); DEBUG((" Text: 0x%08x to 0x%08x\n", min_text_addr, max_text_addr)); DEBUG((" Data: 0x%08x to 0x%08x\n", min_data_addr, max_data_addr)); DEBUG((" Stack: 0x%08x to 0x%08x\n", min_stack_addr, max_stack_addr)); /* Initialize binary semaphore for use as mutex */ vmsem = semaphore_create(); semaphore_initialize(vmsem, 1); /* Initialize page table */ for (i = 0; i < NUMVIRTUALPAGES; i++) { PageTable[i].vpn = i; PageTable[i].mapped = false; } /* Initialize page frame table */ for (i = 0; i < NUMPHYSPAGES; i++) { PageFrame[i].full = false; } pagesfree = NUMPHYSPAGES; assert(pagesfree >= 0); /* Initialize swap space table */ for (i = 0; i < NUMSWAPPAGES; i++) { SwapSpace[i].full = false; } /* Keep track of valid ranges of virtual addresses: */ TextSeg.min = min_text_addr; TextSeg.max = max_text_addr; DataSeg.min = min_data_addr; DataSeg.max = max_data_addr; StackSeg.min = min_stack_addr; StackSeg.max = max_stack_addr; } bool page_valid(unsigned vpage) { /* Make sure the vpage is valid for text, data, or stack addresses. */ if ( (GET_PAGE(TextSeg.min) <= vpage) && (GET_PAGE(TextSeg.max) >= vpage) ) return true; if ( (GET_PAGE(DataSeg.min) <= vpage) && (GET_PAGE(DataSeg.max) >= vpage) ) return true; if ( (GET_PAGE(StackSeg.min) <= vpage) && (GET_PAGE(StackSeg.max) >= vpage) ) return true; return false; } bool page_mapped(unsigned vpage) { return PageTable[vpage].mapped; } bool page_inmem(unsigned vpage) { return PageTable[vpage].inmem; } unsigned pick_victim() { /* Pick a page frame to kick out and return it. Uses the random algorithm. */ unsigned pvictim; pvictim = rand() % NUMPHYSPAGES; assert( (pvictim >= 0) && (pvictim < NUMPHYSPAGES) ); DEBUG(("pick_victim: picked PFN %d\n", pvictim)); return pvictim; } void page_out(unsigned vpage) { /* Page out a page from memory to swap space */ unsigned physaddr, ppage, diskblock; ppage = PageTable[vpage].pfn; physaddr = (ppage) << PAGESHIFT; diskblock = PageTable[vpage].diskblock; DEBUG(("page_out: paging out VPN %d, PFN %d to disk block %d\n", vpage, ppage, diskblock)); assert(PageTable[vpage].vpn == vpage); assert(PageTable[vpage].mapped == true); assert(PageTable[vpage].inmem == true); assert(PageFrame[ppage].full == true); assert(PageFrame[ppage].vpn == vpage); assert(SwapSpace[diskblock].full == true); assert(SwapSpace[diskblock].vpn == vpage); /* Write the block out to disk */ WriteBlock(diskblock, physaddr); /* invalidate any tlb entries for this page: */ tlb_invalidate(vpage); /* Do the bookkeeping */ PageTable[vpage].inmem = false; PageFrame[ppage].full = false; assert(pagesfree >= 0); pagesfree++; } void page_in(unsigned vpage) { /* Page in a page from swap space to memory */ unsigned physaddr, ppage, diskblock; ppage = PageTable[vpage].pfn; physaddr = (ppage) << PAGESHIFT; diskblock = PageTable[vpage].diskblock; DEBUG(("page_in: paging in VPN %d, PFN %d from disk block %d\n", vpage, ppage, diskblock)); assert(PageTable[vpage].vpn == vpage); assert(PageTable[vpage].mapped == true); assert(PageTable[vpage].inmem == false); assert(PageFrame[ppage].full == false); assert(SwapSpace[diskblock].full == true); assert(SwapSpace[diskblock].vpn == vpage); /* Read in the block from disk */ ReadBlock(diskblock, physaddr); /* Do the bookkeeping */ PageTable[vpage].inmem = true; PageFrame[ppage].full = true; PageFrame[ppage].vpn = vpage; assert(pagesfree >= 0); pagesfree--; assert(pagesfree >= 0); } unsigned get_free_phys_page() { /* If there is no free page frame, pick a victim, kick it out */ /* Then find a free page frame and return its location. */ unsigned freepage, i; DEBUG(("get_free_phys_page: Finding a free page. %d pages free.\n", pagesfree)); /* If there is no free page frame, pick a victim, kick it out */ assert(pagesfree >= 0); if (pagesfree == 0) { unsigned victim; victim = pick_victim(); page_out(PageFrame[victim].vpn); } /* Now find a free page frame, mark it used, and return its location. */ /* find a fee page */ assert(pagesfree > 0); for (i = 0; i < NUMPHYSPAGES; i++) if (!PageFrame[i].full) { freepage = i; break; /* out of for loop */ } assert(i < NUMPHYSPAGES); assert(PageFrame[freepage].full == false); DEBUG(("get_free_phys_page: PFN %d is free\n", freepage)); /* return its location */ return freepage; } unsigned get_free_swap_page() { /* Find a free block in swap space and return its location. */ int i; unsigned freepage; /* find a free block */ for (i = 0; i < NUMSWAPPAGES; i++) if (!SwapSpace[i].full) { freepage = i; break; /* out of for loop */ } if (i == NUMSWAPPAGES) { error(Fatal, "Swap space full!\n"); } assert(SwapSpace[freepage].full == false); DEBUG(("get_free_swap_page: Swap Space block %d is free\n", freepage)); /* return its location */ return freepage; } PTE map_page(unsigned vpage) { /* Map the given page into the address space: 1) find a free page in memory 2) find a free page on disk 3) create the page table entry 4) update page frame, swap space tables 5) return a tlb entry */ unsigned ppage, swappage; VPE *pte; PTE tlbe; assert(page_valid(vpage)); assert(PageTable[vpage].mapped == false); DEBUG(("map_page: Mapping VPN %d (addr 0x%x)\n", vpage, vpage << PAGESHIFT)); /* find a free page in swap space */ swappage = get_free_swap_page(); /* find a free page in memory */ ppage = get_free_phys_page(); /* Update page frame table */ assert(pagesfree >= 0); pagesfree--; assert(pagesfree >= 0); PageFrame[ppage].full = true; PageFrame[ppage].vpn = vpage; /* Update swap space table */ SwapSpace[swappage].full = true; SwapSpace[swappage].vpn = vpage; /* create the page table entry */ pte = &PageTable[vpage]; pte->vpn = vpage; pte->mapped = true; pte->inmem = true; pte->pfn = ppage; pte->diskblock = swappage; pte->protection = 0; DEBUG(("map_page: Mapped VPN %d (addr 0x%x) to PFN %d, disk block %d\n", pte->vpn, (pte->vpn) << PAGESHIFT, pte->pfn, pte->diskblock)); tlbe = TLB_MAKE_PTE(pte->pfn, pte->protection); return tlbe; } PTE page_fault(unsigned vpage) { /* Bring the given virtual page into memory so the program can use it. Update the page table and other data structures, and return a TLB entry. */ unsigned ppage, diskblock, flags; PTE tlbe; DEBUG(("page_fault: handling page fault for VPN %d (addr 0x%x)\n", vpage, vpage << PAGESHIFT)); assert(PageTable[vpage].mapped == true); assert(PageTable[vpage].inmem == false); /* find a free page in memory to move the page into */ ppage = get_free_phys_page(); /* paging in will mark the page frame used, keep free page frame count current */ assert(PageFrame[ppage].full == false); /* we already know the disk block where the page is coming from */ diskblock = PageTable[vpage].diskblock; assert(SwapSpace[diskblock].full == true); assert(SwapSpace[diskblock].vpn == vpage); /* Map the page frame into the Page Table entry - we can do this before reading in the data because no other threads can use the page because it isn't in the TLB */ PageTable[vpage].pfn = ppage; /* page into this free page */ page_in(vpage); /* make sure all is groovy */ assert(PageTable[vpage].vpn == vpage); assert(PageTable[vpage].mapped == true); assert(PageTable[vpage].inmem == true); assert(PageTable[vpage].pfn == ppage); assert(PageFrame[ppage].full == true); assert(PageFrame[ppage].vpn == vpage); assert(SwapSpace[diskblock].full == true); assert(SwapSpace[diskblock].vpn == vpage); DEBUG(("page_fault: VPN %d (addr 0x%x) brought into PFN %d from disk block %d\n", vpage, vpage << PAGESHIFT, PageTable[vpage].pfn, PageTable[vpage].diskblock)); /* return a TLB entry */ flags = PageTable[vpage].protection; tlbe = TLB_MAKE_PTE(ppage, flags); return tlbe; } PTE tlb_miss(unsigned vpage) { /* Handle a TLB miss for a page that's in memory. Simply look up the page table entry and return it. */ unsigned ppage, flags; PTE tlbe; ppage = PageTable[vpage].pfn; flags = PageTable[vpage].protection; DEBUG(("tlb_miss: TLB miss for VPN %d (addr 0x%x) resolved to PFN %d\n", vpage, vpage << PAGESHIFT, ppage)); tlbe = TLB_MAKE_PTE(ppage, flags); return tlbe; } /* Convert the virtual page to a physical page and set the flags. * If a page fault occurs, handle it. */ PTE mt_tlb_translate_fault(unsigned long vpage) { unsigned flags, ppage, tlbe; /* body goes here */ DEBUG(("TLB translate fault for VPN %d (addr 0x%x)\n", vpage, vpage << PAGESHIFT)); // semaphore_P(vmsem); /* check to see if the page number is valid */ if (page_valid(vpage)) { assert(page_valid(vpage)); /* check to see if we have a mapping for the page yet */ if (page_mapped(vpage)) { /* the page is mapped into address space already */ /* check to see if it's in memory or on disk */ if (page_inmem(vpage)) { /* page is in memory, so we just had a TLB miss */ assert( page_valid(vpage) && page_mapped(vpage) && page_inmem(vpage)); tlbe = tlb_miss(vpage); } else { /* page is not in memory, so we just had a page fault */ assert( page_valid(vpage) && page_mapped(vpage) && !page_inmem(vpage)); tlbe = page_fault(vpage); } } else { /* No storage for this page yet, so allocate some */ assert( page_valid(vpage) && !page_mapped(vpage)); tlbe = map_page(vpage); } } else { assert(!page_valid(vpage)); error(Fatal, "mt_tlb_translate_fault: Illegal virtual page number: %d (addr 0x%x)\n", vpage, vpage << PAGESHIFT); } // semaphore_V(vmsem); return tlbe; } /* A write fault occured at this virtual page. The expected return * value is a new value for the protection flags for this virtual * page. */ unsigned long mt_tlb_write_fault(unsigned long vpage) { /* I do nothing fancy here because i don't care about distinguishing dirty from clean pages. */ unsigned flags; // semaphore_P(vmsem); flags = PageTable[vpage].protection; // semaphore_V(vmsem); return flags; }