Prex Home / Browse Source - Prex Version: 0.9.0

root/sys/mem/vm_nommu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vm_allocate
  2. do_allocate
  3. vm_free
  4. do_free
  5. vm_attribute
  6. do_attribute
  7. vm_map
  8. do_map
  9. vm_create
  10. vm_terminate
  11. vm_dup
  12. vm_switch
  13. vm_reference
  14. vm_load
  15. vm_translate
  16. vm_info
  17. vm_init
  18. seg_init
  19. seg_create
  20. seg_delete
  21. seg_lookup
  22. seg_alloc
  23. seg_free
  24. seg_reserve

   1 /*-
   2  * Copyright (c) 2005-2009, Kohsuke Ohtani
   3  * All rights reserved.
   4  *
   5  * Redistribution and use in source and binary forms, with or without
   6  * modification, are permitted provided that the following conditions
   7  * are met:
   8  * 1. Redistributions of source code must retain the above copyright
   9  *    notice, this list of conditions and the following disclaimer.
  10  * 2. Redistributions in binary form must reproduce the above copyright
  11  *    notice, this list of conditions and the following disclaimer in the
  12  *    documentation and/or other materials provided with the distribution.
  13  * 3. Neither the name of the author nor the names of any co-contributors
  14  *    may be used to endorse or promote products derived from this software
  15  *    without specific prior written permission.
  16  *
  17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  27  * SUCH DAMAGE.
  28  */
  29 
  30 /*
  31  * vm_nommu.c - virtual memory alloctor for no MMU systems
  32  */
  33 
  34 /*
  35  * When the platform does not support memory management unit (MMU)
  36  * all virtual memories are mapped to the physical memory. So, the
  37  * memory space is shared among all tasks and kernel.
  38  *
  39  * Important: The lists of segments are not sorted by address.
  40  */
  41 
  42 #include <kernel.h>
  43 #include <kmem.h>
  44 #include <thread.h>
  45 #include <page.h>
  46 #include <task.h>
  47 #include <sched.h>
  48 #include <vm.h>
  49 
  50 /* forward declarations */
  51 static void        seg_init(struct seg *);
  52 static struct seg *seg_create(struct seg *, vaddr_t, size_t);
  53 static void        seg_delete(struct seg *, struct seg *);
  54 static struct seg *seg_lookup(struct seg *, vaddr_t, size_t);
  55 static struct seg *seg_alloc(struct seg *, size_t);
  56 static void        seg_free(struct seg *, struct seg *);
  57 static struct seg *seg_reserve(struct seg *, vaddr_t, size_t);
  58 static int         do_allocate(vm_map_t, void **, size_t, int);
  59 static int         do_free(vm_map_t, void *);
  60 static int         do_attribute(vm_map_t, void *, int);
  61 static int         do_map(vm_map_t, void *, size_t, void **);
  62 
  63 
  64 static struct vm_map    kernel_map;     /* vm mapping for kernel */
  65 
  66 /**
  67  * vm_allocate - allocate zero-filled memory for specified address
  68  *
  69  * If "anywhere" argument is true, the "addr" argument will be
  70  * ignored.  In this case, the address of free space will be
  71  * found automatically.
  72  *
  73  * The allocated area has writable, user-access attribute by
  74  * default.  The "addr" and "size" argument will be adjusted
  75  * to page boundary.
  76  */
  77 int
  78 vm_allocate(task_t task, void **addr, size_t size, int anywhere)
  79 {
  80         int error;
  81         void *uaddr;
  82 
  83         sched_lock();
  84 
  85         if (!task_valid(task)) {
  86                 sched_unlock();
  87                 return ESRCH;
  88         }
  89         if (task != curtask && !task_capable(CAP_EXTMEM)) {
  90                 sched_unlock();
  91                 return EPERM;
  92         }
  93         if (copyin(addr, &uaddr, sizeof(*addr))) {
  94                 sched_unlock();
  95                 return EFAULT;
  96         }
  97         if (anywhere == 0 && !user_area(*addr)) {
  98                 sched_unlock();
  99                 return EACCES;
 100         }
 101 
 102         error = do_allocate(task->map, &uaddr, size, anywhere);
 103         if (!error) {
 104                 if (copyout(&uaddr, addr, sizeof(uaddr)))
 105                         error = EFAULT;
 106         }
 107         sched_unlock();
 108         return error;
 109 }
 110 
 111 static int
 112 do_allocate(vm_map_t map, void **addr, size_t size, int anywhere)
 113 {
 114         struct seg *seg;
 115         vaddr_t start, end;
 116 
 117         if (size == 0)
 118                 return EINVAL;
 119         if (map->total + size >= MAXMEM)
 120                 return ENOMEM;
 121 
 122         /*
 123          * Allocate segment, and reserve pages for it.
 124          */
 125         if (anywhere) {
 126                 size = round_page(size);
 127                 if ((seg = seg_alloc(&map->head, size)) == NULL)
 128                         return ENOMEM;
 129                 start = seg->addr;
 130         } else {
 131                 start = trunc_page((vaddr_t)*addr);
 132                 end = round_page(start + size);
 133                 size = (size_t)(end - start);
 134 
 135                 if ((seg = seg_reserve(&map->head, start, size)) == NULL)
 136                         return ENOMEM;
 137         }
 138         seg->flags = SEG_READ | SEG_WRITE;
 139 
 140         /* Zero fill */
 141         memset((void *)start, 0, size);
 142         *addr = (void *)seg->addr;
 143         map->total += size;
 144         return 0;
 145 }
 146 
 147 /*
 148  * Deallocate memory segment for specified address.
 149  *
 150  * The "addr" argument points to a memory segment previously
 151  * allocated through a call to vm_allocate() or vm_map(). The
 152  * number of bytes freed is the number of bytes of the
 153  * allocated segment.  If one of the segment of previous and
 154  * next are free, it combines with them, and larger free
 155  * segment is created.
 156  */
 157 int
 158 vm_free(task_t task, void *addr)
 159 {
 160         int error;
 161 
 162         sched_lock();
 163         if (!task_valid(task)) {
 164                 sched_unlock();
 165                 return ESRCH;
 166         }
 167         if (task != curtask && !task_capable(CAP_EXTMEM)) {
 168                 sched_unlock();
 169                 return EPERM;
 170         }
 171         if (!user_area(addr)) {
 172                 sched_unlock();
 173                 return EFAULT;
 174         }
 175 
 176         error = do_free(task->map, addr);
 177 
 178         sched_unlock();
 179         return error;
 180 }
 181 
 182 static int
 183 do_free(vm_map_t map, void *addr)
 184 {
 185         struct seg *seg;
 186         vaddr_t va;
 187 
 188         va = trunc_page((vaddr_t)addr);
 189 
 190         /*
 191          * Find the target segment.
 192          */
 193         seg = seg_lookup(&map->head, va, 1);
 194         if (seg == NULL || seg->addr != va || (seg->flags & SEG_FREE))
 195                 return EINVAL;  /* not allocated */
 196 
 197         /*
 198          * Relinquish use of the page if it is not shared and mapped.
 199          */
 200         if (!(seg->flags & SEG_SHARED) && !(seg->flags & SEG_MAPPED))
 201                 page_free(seg->phys, seg->size);
 202 
 203         map->total -= seg->size;
 204         seg_free(&map->head, seg);
 205 
 206         return 0;
 207 }
 208 
 209 /*
 210  * Change attribute of specified virtual address.
 211  *
 212  * The "addr" argument points to a memory segment previously
 213  * allocated through a call to vm_allocate(). The attribute
 214  * type can be chosen a combination of PROT_READ, PROT_WRITE.
 215  * Note: PROT_EXEC is not supported, yet.
 216  */
 217 int
 218 vm_attribute(task_t task, void *addr, int attr)
 219 {
 220         int error;
 221 
 222         sched_lock();
 223         if (attr == 0 || attr & ~(PROT_READ | PROT_WRITE)) {
 224                 sched_unlock();
 225                 return EINVAL;
 226         }
 227         if (!task_valid(task)) {
 228                 sched_unlock();
 229                 return ESRCH;
 230         }
 231         if (task != curtask && !task_capable(CAP_EXTMEM)) {
 232                 sched_unlock();
 233                 return EPERM;
 234         }
 235         if (!user_area(addr)) {
 236                 sched_unlock();
 237                 return EFAULT;
 238         }
 239 
 240         error = do_attribute(task->map, addr, attr);
 241 
 242         sched_unlock();
 243         return error;
 244 }
 245 
 246 static int
 247 do_attribute(vm_map_t map, void *addr, int attr)
 248 {
 249         struct seg *seg;
 250         int new_flags = 0;
 251         vaddr_t va;
 252 
 253         va = trunc_page((vaddr_t)addr);
 254 
 255         /*
 256          * Find the target segment.
 257          */
 258         seg = seg_lookup(&map->head, va, 1);
 259         if (seg == NULL || seg->addr != va || (seg->flags & SEG_FREE)) {
 260                 return EINVAL;  /* not allocated */
 261         }
 262         /*
 263          * The attribute of the mapped or shared segment can not be changed.
 264          */
 265         if ((seg->flags & SEG_MAPPED) || (seg->flags & SEG_SHARED))
 266                 return EINVAL;
 267 
 268         /*
 269          * Check new and old flag.
 270          */
 271         if (seg->flags & SEG_WRITE) {
 272                 if (!(attr & PROT_WRITE))
 273                         new_flags = SEG_READ;
 274         } else {
 275                 if (attr & PROT_WRITE)
 276                         new_flags = SEG_READ | SEG_WRITE;
 277         }
 278         if (new_flags == 0)
 279                 return 0;       /* same attribute */
 280         seg->flags = new_flags;
 281         return 0;
 282 }
 283 
 284 /**
 285  * vm_map - map another task's memory to current task.
 286  *
 287  * Note: This routine does not support mapping to the specific
 288  * address.
 289  */
 290 int
 291 vm_map(task_t target, void *addr, size_t size, void **alloc)
 292 {
 293         int error;
 294 
 295         sched_lock();
 296         if (!task_valid(target)) {
 297                 sched_unlock();
 298                 return ESRCH;
 299         }
 300         if (target == curtask) {
 301                 sched_unlock();
 302                 return EINVAL;
 303         }
 304         if (!task_capable(CAP_EXTMEM)) {
 305                 sched_unlock();
 306                 return EPERM;
 307         }
 308         if (!user_area(addr)) {
 309                 sched_unlock();
 310                 return EFAULT;
 311         }
 312 
 313         error = do_map(target->map, addr, size, alloc);
 314 
 315         sched_unlock();
 316         return error;
 317 }
 318 
 319 static int
 320 do_map(vm_map_t map, void *addr, size_t size, void **alloc)
 321 {
 322         struct seg *seg, *tgt;
 323         vm_map_t curmap;
 324         vaddr_t start, end;
 325         void *tmp;
 326 
 327         if (size == 0)
 328                 return EINVAL;
 329         if (map->total + size >= MAXMEM)
 330                 return ENOMEM;
 331 
 332         /* check fault */
 333         tmp = NULL;
 334         if (copyout(&tmp, alloc, sizeof(tmp)))
 335                 return EFAULT;
 336 
 337         start = trunc_page((vaddr_t)addr);
 338         end = round_page((vaddr_t)addr + size);
 339         size = (size_t)(end - start);
 340 
 341         /*
 342          * Find the segment that includes target address
 343          */
 344         seg = seg_lookup(&map->head, start, size);
 345         if (seg == NULL || (seg->flags & SEG_FREE))
 346                 return EINVAL;  /* not allocated */
 347         tgt = seg;
 348 
 349         /*
 350          * Create new segment to map
 351          */
 352         curmap = curtask->map;
 353         if ((seg = seg_create(&curmap->head, start, size)) == NULL)
 354                 return ENOMEM;
 355         seg->flags = tgt->flags | SEG_MAPPED;
 356 
 357         copyout(&addr, alloc, sizeof(addr));
 358 
 359         curmap->total += size;
 360         return 0;
 361 }
 362 
 363 /*
 364  * Create new virtual memory space.
 365  * No memory is inherited.
 366  * Must be called with scheduler locked.
 367  */
 368 vm_map_t
 369 vm_create(void)
 370 {
 371         struct vm_map *map;
 372 
 373         /* Allocate new map structure */
 374         if ((map = kmem_alloc(sizeof(*map))) == NULL)
 375                 return NULL;
 376 
 377         map->refcnt = 1;
 378         map->total = 0;
 379 
 380         seg_init(&map->head);
 381         return map;
 382 }
 383 
 384 /*
 385  * Terminate specified virtual memory space.
 386  * This is called when task is terminated.
 387  */
 388 void
 389 vm_terminate(vm_map_t map)
 390 {
 391         struct seg *seg, *tmp;
 392 
 393         if (--map->refcnt > 0)
 394                 return;
 395 
 396         sched_lock();
 397         seg = &map->head;
 398         do {
 399                 if (seg->flags != SEG_FREE) {
 400                         /* Free segment if it is not shared and mapped */
 401                         if (!(seg->flags & SEG_SHARED) &&
 402                             !(seg->flags & SEG_MAPPED)) {
 403                                 page_free(seg->phys, seg->size);
 404                         }
 405                 }
 406                 tmp = seg;
 407                 seg = seg->next;
 408                 seg_delete(&map->head, tmp);
 409         } while (seg != &map->head);
 410 
 411         kmem_free(map);
 412         sched_unlock();
 413 }
 414 
 415 /*
 416  * Duplicate specified virtual memory space.
 417  */
 418 vm_map_t
 419 vm_dup(vm_map_t org_map)
 420 {
 421         /*
 422          * This function is not supported with no MMU system.
 423          */
 424         return NULL;
 425 }
 426 
 427 /*
 428  * Switch VM mapping.
 429  */
 430 void
 431 vm_switch(vm_map_t map)
 432 {
 433 }
 434 
 435 /*
 436  * Increment reference count of VM mapping.
 437  */
 438 int
 439 vm_reference(vm_map_t map)
 440 {
 441 
 442         map->refcnt++;
 443         return 0;
 444 }
 445 
 446 /*
 447  * Setup task image for boot task. (NOMMU version)
 448  * Return 0 on success, errno on failure.
 449  *
 450  * Note: We assume that the task images are already copied to
 451  * the proper address by a boot loader.
 452  */
 453 int
 454 vm_load(vm_map_t map, struct module *mod, void **stack)
 455 {
 456         struct seg *seg;
 457         vaddr_t base, start, end;
 458         size_t size;
 459 
 460         DPRINTF(("Loading task:\'%s\'\n", mod->name));
 461 
 462         /*
 463          * Reserve text & data area
 464          */
 465         base = mod->text;
 466         size = mod->textsz + mod->datasz + mod->bsssz;
 467         if (size == 0)
 468                 return EINVAL;
 469 
 470         start = trunc_page(base);
 471         end = round_page(start + size);
 472         size = (size_t)(end - start);
 473 
 474         if ((seg = seg_create(&map->head, start, size)) == NULL)
 475                 return ENOMEM;
 476 
 477         seg->flags = SEG_READ | SEG_WRITE;
 478 
 479         if (mod->bsssz != 0)
 480                 memset((void *)(mod->data + mod->datasz), 0, mod->bsssz);
 481 
 482         /*
 483          * Create stack
 484          */
 485         return do_allocate(map, stack, DFLSTKSZ, 1);
 486 }
 487 
 488 /*
 489  * Translate virtual address of current task to physical address.
 490  * Returns physical address on success, or NULL if no mapped memory.
 491  */
 492 paddr_t
 493 vm_translate(vaddr_t addr, size_t size)
 494 {
 495 
 496         return (paddr_t)addr;
 497 }
 498 
 499 int
 500 vm_info(struct vminfo *info)
 501 {
 502         u_long target = info->cookie;
 503         task_t task = info->task;
 504         u_long i;
 505         vm_map_t map;
 506         struct seg *seg;
 507 
 508         sched_lock();
 509         if (!task_valid(task)) {
 510                 sched_unlock();
 511                 return ESRCH;
 512         }
 513         map = task->map;
 514         seg = &map->head;
 515         i = 0;
 516         do {
 517                 if (i++ == target) {
 518                         info->cookie = i;
 519                         info->virt = seg->addr;
 520                         info->size = seg->size;
 521                         info->flags = seg->flags;
 522                         info->phys = seg->phys;
 523                         sched_unlock();
 524                         return 0;
 525                 }
 526                 seg = seg->next;
 527         } while (seg != &map->head);
 528         sched_unlock();
 529         return ESRCH;
 530 }
 531 
 532 void
 533 vm_init(void)
 534 {
 535 
 536         seg_init(&kernel_map.head);
 537         kernel_task.map = &kernel_map;
 538 }
 539 
 540 /*
 541  * Initialize segment.
 542  */
 543 static void
 544 seg_init(struct seg *seg)
 545 {
 546 
 547         seg->next = seg->prev = seg;
 548         seg->sh_next = seg->sh_prev = seg;
 549         seg->addr = 0;
 550         seg->phys = 0;
 551         seg->size = 0;
 552         seg->flags = SEG_FREE;
 553 }
 554 
 555 /*
 556  * Create new free segment after the specified segment.
 557  * Returns segment on success, or NULL on failure.
 558  */
 559 static struct seg *
 560 seg_create(struct seg *prev, vaddr_t addr, size_t size)
 561 {
 562         struct seg *seg;
 563 
 564         if ((seg = kmem_alloc(sizeof(*seg))) == NULL)
 565                 return NULL;
 566 
 567         seg->addr = addr;
 568         seg->size = size;
 569         seg->phys = (paddr_t)addr;
 570         seg->flags = SEG_FREE;
 571         seg->sh_next = seg->sh_prev = seg;
 572 
 573         seg->next = prev->next;
 574         seg->prev = prev;
 575         prev->next->prev = seg;
 576         prev->next = seg;
 577 
 578         return seg;
 579 }
 580 
 581 /*
 582  * Delete specified segment.
 583  */
 584 static void
 585 seg_delete(struct seg *head, struct seg *seg)
 586 {
 587 
 588         /*
 589          * If it is shared segment, unlink from shared list.
 590          */
 591         if (seg->flags & SEG_SHARED) {
 592                 seg->sh_prev->sh_next = seg->sh_next;
 593                 seg->sh_next->sh_prev = seg->sh_prev;
 594                 if (seg->sh_prev == seg->sh_next)
 595                         seg->sh_prev->flags &= ~SEG_SHARED;
 596         }
 597         if (head != seg)
 598                 kmem_free(seg);
 599 }
 600 
 601 /*
 602  * Find the segment at the specified address.
 603  */
 604 static struct seg *
 605 seg_lookup(struct seg *head, vaddr_t addr, size_t size)
 606 {
 607         struct seg *seg;
 608 
 609         seg = head;
 610         do {
 611                 if (seg->addr <= addr &&
 612                     seg->addr + seg->size >= addr + size) {
 613                         return seg;
 614                 }
 615                 seg = seg->next;
 616         } while (seg != head);
 617         return NULL;
 618 }
 619 
 620 /*
 621  * Allocate free segment for specified size.
 622  */
 623 static struct seg *
 624 seg_alloc(struct seg *head, size_t size)
 625 {
 626         struct seg *seg;
 627         paddr_t pa;
 628 
 629         if ((pa = page_alloc(size)) == 0)
 630                 return NULL;
 631 
 632         if ((seg = seg_create(head, (vaddr_t)pa, size)) == NULL) {
 633                 page_free(pa, size);
 634                 return NULL;
 635         }
 636         return seg;
 637 }
 638 
 639 /*
 640  * Delete specified free segment.
 641  */
 642 static void
 643 seg_free(struct seg *head, struct seg *seg)
 644 {
 645         ASSERT(seg->flags != SEG_FREE);
 646 
 647         /*
 648          * If it is shared segment, unlink from shared list.
 649          */
 650         if (seg->flags & SEG_SHARED) {
 651                 seg->sh_prev->sh_next = seg->sh_next;
 652                 seg->sh_next->sh_prev = seg->sh_prev;
 653                 if (seg->sh_prev == seg->sh_next)
 654                         seg->sh_prev->flags &= ~SEG_SHARED;
 655         }
 656         seg->prev->next = seg->next;
 657         seg->next->prev = seg->prev;
 658 
 659         kmem_free(seg);
 660 }
 661 
 662 /*
 663  * Reserve the segment at the specified address/size.
 664  */
 665 static struct seg *
 666 seg_reserve(struct seg *head, vaddr_t addr, size_t size)
 667 {
 668         struct seg *seg;
 669         paddr_t pa;
 670 
 671         pa = (paddr_t)addr;
 672 
 673         if (page_reserve(pa, size) != 0)
 674                 return NULL;
 675 
 676         if ((seg = seg_create(head, (vaddr_t)pa, size)) == NULL) {
 677                 page_free(pa, size);
 678                 return NULL;
 679         }
 680         return seg;
 681 }

/* [<][>][^][v][top][bottom][index][help] */