Prex Home / Browse Source - Prex Version: 0.9.0

root/bsp/hal/x86/arch/mmu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mmu_map
  2. mmu_newmap
  3. mmu_terminate
  4. mmu_switch
  5. mmu_extract
  6. mmu_init

   1 /*-
   2  * Copyright (c) 2005-2009, Kohsuke Ohtani
   3  * All rights reserved.
   4  *
   5  * Redistribution and use in source and binary forms, with or without
   6  * modification, are permitted provided that the following conditions
   7  * are met:
   8  * 1. Redistributions of source code must retain the above copyright
   9  *    notice, this list of conditions and the following disclaimer.
  10  * 2. Redistributions in binary form must reproduce the above copyright
  11  *    notice, this list of conditions and the following disclaimer in the
  12  *    documentation and/or other materials provided with the distribution.
  13  * 3. Neither the name of the author nor the names of any co-contributors
  14  *    may be used to endorse or promote products derived from this software
  15  *    without specific prior written permission.
  16  *
  17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  27  * SUCH DAMAGE.
  28  */
  29 
  30 /*
  31  * mmu.c - memory management unit support routines
  32  */
  33 
  34 /*
  35  * This module provides virtual/physical address translation for
  36  * intel x86 MMU. This kernel will do only page level translation
  37  * and protection and it does not use x86 segment mechanism.
  38  */
  39 
  40 #include <machine/syspage.h>
  41 #include <kernel.h>
  42 #include <page.h>
  43 #include <mmu.h>
  44 #include <cpu.h>
  45 #include <cpufunc.h>
  46 
  47 /*
  48  * Boot page directory.
  49  * This works as a template for all page directory.
  50  */
  51 static pgd_t boot_pgd = (pgd_t)BOOT_PGD;
  52 
  53 /*
  54  * Map physical memory range into virtual address
  55  *
  56  * Returns 0 on success, or ENOMEM on failure.
  57  *
  58  * Map type can be one of the following type.
  59  *   PG_UNMAP  - Remove mapping
  60  *   PG_READ   - Read only mapping
  61  *   PG_WRITE  - Read/write allowed
  62  *   PG_KERNEL - Kernel page
  63  *   PG_IO     - I/O memory
  64  *
  65  * Setup the appropriate page tables for mapping. If there is no
  66  * page table for the specified address, new page table is
  67  * allocated.
  68  *
  69  * This routine does not return any error even if the specified
  70  * address has been already mapped to other physical address.
  71  * In this case, it will just override the existing mapping.
  72  *
  73  * In order to unmap the page, pg_type is specified as 0.  But,
  74  * the page tables are not released even if there is no valid
  75  * page entry in it. All page tables are released when mmu_delmap()
  76  * is called when task is terminated.
  77  *
  78  * TODO: TLB should be flushed for specific page by invalpg in
  79  * case of i486.
  80  */
  81 int
  82 mmu_map(pgd_t pgd, paddr_t pa, vaddr_t va, size_t size, int type)
  83 {
  84         uint32_t pte_flag = 0;
  85         uint32_t pde_flag = 0;
  86         pte_t pte;
  87         paddr_t pg;
  88 
  89         pa = round_page(pa);
  90         va = round_page(va);
  91         size = trunc_page(size);
  92 
  93         /*
  94          * Set page flag
  95          */
  96         switch (type) {
  97         case PG_UNMAP:
  98                 pte_flag = 0;
  99                 pde_flag = (uint32_t)(PDE_PRESENT | PDE_WRITE | PDE_USER);
 100                 break;
 101         case PG_READ:
 102                 pte_flag = (uint32_t)(PTE_PRESENT | PTE_USER);
 103                 pde_flag = (uint32_t)(PDE_PRESENT | PDE_WRITE | PDE_USER);
 104                 break;
 105         case PG_WRITE:
 106                 pte_flag = (uint32_t)(PTE_PRESENT | PTE_WRITE | PTE_USER);
 107                 pde_flag = (uint32_t)(PDE_PRESENT | PDE_WRITE | PDE_USER);
 108                 break;
 109         case PG_SYSTEM:
 110                 pde_flag = (uint32_t)(PDE_PRESENT | PDE_WRITE);
 111                 pte_flag = (uint32_t)(PTE_PRESENT | PTE_WRITE);
 112                 break;
 113         case PG_IOMEM:
 114                 pde_flag = (uint32_t)(PDE_PRESENT | PDE_WRITE);
 115                 pte_flag = (uint32_t)(PTE_PRESENT | PTE_WRITE | PTE_NCACHE);
 116                 break;
 117         default:
 118                 panic("mmu_map");
 119         }
 120         /*
 121          * Map all pages
 122          */
 123         while (size > 0) {
 124                 if (pte_present(pgd, va)) {
 125                         /* Page table already exists for the address */
 126                         pte = vtopte(pgd, va);
 127                 } else {
 128                         ASSERT(pte_flag != 0);
 129                         if ((pg = page_alloc(PAGE_SIZE)) == 0) {
 130                                 DPRINTF(("Error: MMU mapping failed\n"));
 131                                 return ENOMEM;
 132                         }
 133                         pgd[PAGE_DIR(va)] = (uint32_t)pg | pde_flag;
 134                         pte = (pte_t)ptokv(pg);
 135                         memset(pte, 0, PAGE_SIZE);
 136                 }
 137                 /* Set new entry into page table */
 138                 pte[PAGE_TABLE(va)] = (uint32_t)pa | pte_flag;
 139 
 140                 /* Process next page */
 141                 pa += PAGE_SIZE;
 142                 va += PAGE_SIZE;
 143                 size -= PAGE_SIZE;
 144         }
 145         flush_tlb();
 146         return 0;
 147 }
 148 
 149 /*
 150  * Create new page map.
 151  *
 152  * Returns a page directory on success, or NULL on failure.  This
 153  * routine is called when new task is created. All page map must
 154  * have the same kernel page table in it. So, the kernel page
 155  * tables are copied to newly created map.
 156  */
 157 pgd_t
 158 mmu_newmap(void)
 159 {
 160         paddr_t pg;
 161         pgd_t pgd;
 162         int i;
 163 
 164         /* Allocate page directory */
 165         if ((pg = page_alloc(PAGE_SIZE)) == 0)
 166                 return NO_PGD;
 167         pgd = (pgd_t)ptokv(pg);
 168         memset(pgd, 0, PAGE_SIZE);
 169 
 170         /* Copy kernel page tables */
 171         i = PAGE_DIR(KERNBASE);
 172         memcpy(&pgd[i], &boot_pgd[i], (size_t)(1024 - i));
 173         return pgd;
 174 }
 175 
 176 /*
 177  * Terminate all page mapping.
 178  */
 179 void
 180 mmu_terminate(pgd_t pgd)
 181 {
 182         int i;
 183         pte_t pte;
 184 
 185         flush_tlb();
 186 
 187         /* Release all user page table */
 188         for (i = 0; i < PAGE_DIR(KERNBASE); i++) {
 189                 pte = (pte_t)pgd[i];
 190                 if (pte != 0)
 191                         page_free((paddr_t)((paddr_t)pte & PTE_ADDRESS),
 192                                   PAGE_SIZE);
 193         }
 194         /* Release page directory */
 195         page_free(kvtop(pgd), PAGE_SIZE);
 196 }
 197 
 198 /*
 199  * Switch to new page directory
 200  *
 201  * This is called when context is switched.
 202  * Whole TLB are flushed automatically by loading
 203  * CR3 register.
 204  */
 205 void
 206 mmu_switch(pgd_t pgd)
 207 {
 208         uint32_t phys = (uint32_t)kvtop(pgd);
 209 
 210         if (phys != get_cr3())
 211                 set_cr3(phys);
 212 }
 213 
 214 /*
 215  * Returns the physical address for the specified virtual address.
 216  * This routine checks if the virtual area actually exist.
 217  * It returns 0 if at least one page is not mapped.
 218  */
 219 paddr_t
 220 mmu_extract(pgd_t pgd, vaddr_t va, size_t size)
 221 {
 222         pte_t pte;
 223         vaddr_t start, end, pg;
 224         paddr_t pa;
 225 
 226         start = trunc_page(va);
 227         end = trunc_page(va + size - 1);
 228 
 229         /* Check all pages exist */
 230         for (pg = start; pg <= end; pg += PAGE_SIZE) {
 231                 if (!pte_present(pgd, pg))
 232                         return 0;
 233                 pte = vtopte(pgd, pg);
 234                 if (!page_present(pte, pg))
 235                         return 0;
 236         }
 237 
 238         /* Get physical address */
 239         pte = vtopte(pgd, start);
 240         pa = (paddr_t)ptetopg(pte, start);
 241         return pa + (paddr_t)(va - start);
 242 }
 243 
 244 /*
 245  * Initialize mmu
 246  *
 247  * Paging is already enabled in locore.S. And, physical address
 248  * 0-4M has been already mapped into kernel space in locore.S.
 249  * Now, all physical memory is mapped into kernel virtual address
 250  * as straight 1:1 mapping. User mode access is not allowed for
 251  * these kernel pages.
 252  * page_init() must be called before calling this routine.
 253  *
 254  * Note: This routine requires 4K bytes to map 4M bytes memory. So,
 255  * if the system has a lot of RAM, the "used memory" by kernel will
 256  * become large, too. For example, page table requires 512K bytes
 257  * for 512M bytes system RAM.
 258  */
 259 void
 260 mmu_init(struct mmumap *mmumap_table)
 261 {
 262         struct mmumap *map;
 263         int map_type = 0;
 264 
 265         for (map = mmumap_table; map->type != 0; map++) {
 266                 switch (map->type) {
 267                 case VMT_RAM:
 268                 case VMT_ROM:
 269                 case VMT_DMA:
 270                         map_type = PG_SYSTEM;
 271                         break;
 272                 case VMT_IO:
 273                         map_type = PG_IOMEM;
 274                         break;
 275                 }
 276 
 277                 if (mmu_map(boot_pgd, map->phys, map->virt,
 278                             (size_t)map->size, map_type))
 279                         panic("mmu_init");
 280         }
 281 }

/* [<][>][^][v][top][bottom][index][help] */