Prex Home / Browse Source - Prex Version: 0.9.0

root/sys/mem/vm.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vm_allocate
  2. do_allocate
  3. vm_free
  4. do_free
  5. vm_attribute
  6. do_attribute
  7. vm_map
  8. do_map
  9. vm_create
  10. vm_terminate
  11. vm_dup
  12. do_dup
  13. vm_switch
  14. vm_reference
  15. vm_load
  16. vm_translate
  17. vm_info
  18. vm_init
  19. seg_init
  20. seg_create
  21. seg_delete
  22. seg_lookup
  23. seg_alloc
  24. seg_free
  25. seg_reserve

   1 /*-
   2  * Copyright (c) 2005-2009, Kohsuke Ohtani
   3  * All rights reserved.
   4  *
   5  * Redistribution and use in source and binary forms, with or without
   6  * modification, are permitted provided that the following conditions
   7  * are met:
   8  * 1. Redistributions of source code must retain the above copyright
   9  *    notice, this list of conditions and the following disclaimer.
  10  * 2. Redistributions in binary form must reproduce the above copyright
  11  *    notice, this list of conditions and the following disclaimer in the
  12  *    documentation and/or other materials provided with the distribution.
  13  * 3. Neither the name of the author nor the names of any co-contributors
  14  *    may be used to endorse or promote products derived from this software
  15  *    without specific prior written permission.
  16  *
  17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  27  * SUCH DAMAGE.
  28  */
  29 
  30 /*
  31  * vm.c - virtual memory allocator
  32  */
  33 
  34 /*
  35  * A task owns its private virtual address space. All threads in
  36  * a task share one same memory space.
  37  * When new task is made, the address mapping of the parent task
  38  * is copied to child task's. In this time, the read-only space
  39  * is shared with old map.
  40  *
  41  * Since this kernel does not do page out to the physical storage,
  42  * it is guaranteed that the allocated memory is always continuing
  43  * and existing. Thereby, a kernel and drivers can be constructed
  44  * very simply.
  45  */
  46 
  47 #include <kernel.h>
  48 #include <kmem.h>
  49 #include <thread.h>
  50 #include <page.h>
  51 #include <task.h>
  52 #include <sched.h>
  53 #include <hal.h>
  54 #include <vm.h>
  55 
  56 /* forward declarations */
  57 static void        seg_init(struct seg *);
  58 static struct seg *seg_create(struct seg *, vaddr_t, size_t);
  59 static void        seg_delete(struct seg *, struct seg *);
  60 static struct seg *seg_lookup(struct seg *, vaddr_t, size_t);
  61 static struct seg *seg_alloc(struct seg *, size_t);
  62 static void        seg_free(struct seg *, struct seg *);
  63 static struct seg *seg_reserve(struct seg *, vaddr_t, size_t);
  64 static int         do_allocate(vm_map_t, void **, size_t, int);
  65 static int         do_free(vm_map_t, void *);
  66 static int         do_attribute(vm_map_t, void *, int);
  67 static int         do_map(vm_map_t, void *, size_t, void **);
  68 static vm_map_t    do_dup(vm_map_t);
  69 
  70 
  71 static struct vm_map    kernel_map;     /* vm mapping for kernel */
  72 
  73 /**
  74  * vm_allocate - allocate zero-filled memory for specified address
  75  *
  76  * If "anywhere" argument is true, the "addr" argument will be
  77  * ignored.  In this case, the address of free space will be
  78  * found automatically.
  79  *
  80  * The allocated area has writable, user-access attribute by
  81  * default.  The "addr" and "size" argument will be adjusted
  82  * to page boundary.
  83  */
  84 int
  85 vm_allocate(task_t task, void **addr, size_t size, int anywhere)
  86 {
  87         int error;
  88         void *uaddr;
  89 
  90         sched_lock();
  91 
  92         if (!task_valid(task)) {
  93                 sched_unlock();
  94                 return ESRCH;
  95         }
  96         if (task != curtask && !task_capable(CAP_EXTMEM)) {
  97                 sched_unlock();
  98                 return EPERM;
  99         }
 100         if (copyin(addr, &uaddr, sizeof(uaddr))) {
 101                 sched_unlock();
 102                 return EFAULT;
 103         }
 104         if (anywhere == 0 && !user_area(*addr)) {
 105                 sched_unlock();
 106                 return EACCES;
 107         }
 108 
 109         error = do_allocate(task->map, &uaddr, size, anywhere);
 110         if (!error) {
 111                 if (copyout(&uaddr, addr, sizeof(uaddr)))
 112                         error = EFAULT;
 113         }
 114         sched_unlock();
 115         return error;
 116 }
 117 
 118 static int
 119 do_allocate(vm_map_t map, void **addr, size_t size, int anywhere)
 120 {
 121         struct seg *seg;
 122         vaddr_t start, end;
 123         paddr_t pa;
 124 
 125         if (size == 0)
 126                 return EINVAL;
 127         if (map->total + size >= MAXMEM)
 128                 return ENOMEM;
 129 
 130         /*
 131          * Allocate segment
 132          */
 133         if (anywhere) {
 134                 size = round_page(size);
 135                 if ((seg = seg_alloc(&map->head, size)) == NULL)
 136                         return ENOMEM;
 137         } else {
 138                 start = trunc_page((vaddr_t)*addr);
 139                 end = round_page(start + size);
 140                 size = (size_t)(end - start);
 141 
 142                 if ((seg = seg_reserve(&map->head, start, size)) == NULL)
 143                         return ENOMEM;
 144         }
 145         seg->flags = SEG_READ | SEG_WRITE;
 146 
 147         /*
 148          * Allocate physical pages, and map them into virtual address
 149          */
 150         if ((pa = page_alloc(size)) == 0)
 151                 goto err1;
 152 
 153         if (mmu_map(map->pgd, pa, seg->addr, size, PG_WRITE))
 154                 goto err2;
 155 
 156         seg->phys = pa;
 157 
 158         /* Zero fill */
 159         memset(ptokv(pa), 0, seg->size);
 160         *addr = (void *)seg->addr;
 161         map->total += size;
 162         return 0;
 163 
 164  err2:
 165         page_free(pa, size);
 166  err1:
 167         seg_free(&map->head, seg);
 168         return ENOMEM;
 169 }
 170 
 171 /*
 172  * Deallocate memory segment for specified address.
 173  *
 174  * The "addr" argument points to a memory segment previously
 175  * allocated through a call to vm_allocate() or vm_map(). The
 176  * number of bytes freed is the number of bytes of the
 177  * allocated segment. If one of the segment of previous and next
 178  * are free, it combines with them, and larger free segment is
 179  * created.
 180  */
 181 int
 182 vm_free(task_t task, void *addr)
 183 {
 184         int error;
 185 
 186         sched_lock();
 187         if (!task_valid(task)) {
 188                 sched_unlock();
 189                 return ESRCH;
 190         }
 191         if (task != curtask && !task_capable(CAP_EXTMEM)) {
 192                 sched_unlock();
 193                 return EPERM;
 194         }
 195         if (!user_area(addr)) {
 196                 sched_unlock();
 197                 return EFAULT;
 198         }
 199 
 200         error = do_free(task->map, addr);
 201 
 202         sched_unlock();
 203         return error;
 204 }
 205 
 206 static int
 207 do_free(vm_map_t map, void *addr)
 208 {
 209         struct seg *seg;
 210         vaddr_t va;
 211 
 212         va = trunc_page((vaddr_t)addr);
 213 
 214         /*
 215          * Find the target segment.
 216          */
 217         seg = seg_lookup(&map->head, va, 1);
 218         if (seg == NULL || seg->addr != va || (seg->flags & SEG_FREE))
 219                 return EINVAL;
 220 
 221         /*
 222          * Unmap pages of the segment.
 223          */
 224         mmu_map(map->pgd, seg->phys, seg->addr, seg->size, PG_UNMAP);
 225 
 226         /*
 227          * Relinquish use of the page if it is not shared and mapped.
 228          */
 229         if (!(seg->flags & SEG_SHARED) && !(seg->flags & SEG_MAPPED))
 230                 page_free(seg->phys, seg->size);
 231 
 232         map->total -= seg->size;
 233         seg_free(&map->head, seg);
 234 
 235         return 0;
 236 }
 237 
 238 /*
 239  * Change attribute of specified virtual address.
 240  *
 241  * The "addr" argument points to a memory segment previously
 242  * allocated through a call to vm_allocate(). The attribute
 243  * type can be chosen a combination of PROT_READ, PROT_WRITE.
 244  * Note: PROT_EXEC is not supported, yet.
 245  */
 246 int
 247 vm_attribute(task_t task, void *addr, int attr)
 248 {
 249         int error;
 250 
 251         sched_lock();
 252         if (attr == 0 || attr & ~(PROT_READ | PROT_WRITE)) {
 253                 sched_unlock();
 254                 return EINVAL;
 255         }
 256         if (!task_valid(task)) {
 257                 sched_unlock();
 258                 return ESRCH;
 259         }
 260         if (task != curtask && !task_capable(CAP_EXTMEM)) {
 261                 sched_unlock();
 262                 return EPERM;
 263         }
 264         if (!user_area(addr)) {
 265                 sched_unlock();
 266                 return EFAULT;
 267         }
 268 
 269         error = do_attribute(task->map, addr, attr);
 270 
 271         sched_unlock();
 272         return error;
 273 }
 274 
 275 static int
 276 do_attribute(vm_map_t map, void *addr, int attr)
 277 {
 278         struct seg *seg;
 279         int new_flags, map_type;
 280         paddr_t old_pa, new_pa;
 281         vaddr_t va;
 282 
 283         va = trunc_page((vaddr_t)addr);
 284 
 285         /*
 286          * Find the target segment.
 287          */
 288         seg = seg_lookup(&map->head, va, 1);
 289         if (seg == NULL || seg->addr != va || (seg->flags & SEG_FREE)) {
 290                 return EINVAL;  /* not allocated */
 291         }
 292         /*
 293          * The attribute of the mapped segment can not be changed.
 294          */
 295         if (seg->flags & SEG_MAPPED)
 296                 return EINVAL;
 297 
 298         /*
 299          * Check new and old flag.
 300          */
 301         new_flags = 0;
 302         if (seg->flags & SEG_WRITE) {
 303                 if (!(attr & PROT_WRITE))
 304                         new_flags = SEG_READ;
 305         } else {
 306                 if (attr & PROT_WRITE)
 307                         new_flags = SEG_READ | SEG_WRITE;
 308         }
 309         if (new_flags == 0)
 310                 return 0;       /* same attribute */
 311 
 312         map_type = (new_flags & SEG_WRITE) ? PG_WRITE : PG_READ;
 313 
 314         /*
 315          * If it is shared segment, duplicate it.
 316          */
 317         if (seg->flags & SEG_SHARED) {
 318 
 319                 old_pa = seg->phys;
 320 
 321                 /* Allocate new physical page. */
 322                 if ((new_pa = page_alloc(seg->size)) == 0)
 323                         return ENOMEM;
 324 
 325                 /* Copy source page */
 326                 memcpy(ptokv(new_pa), ptokv(old_pa), seg->size);
 327 
 328                 /* Map new segment */
 329                 if (mmu_map(map->pgd, new_pa, seg->addr, seg->size,
 330                             map_type)) {
 331                         page_free(new_pa, seg->size);
 332                         return ENOMEM;
 333                 }
 334                 seg->phys = new_pa;
 335 
 336                 /* Unlink from shared list */
 337                 seg->sh_prev->sh_next = seg->sh_next;
 338                 seg->sh_next->sh_prev = seg->sh_prev;
 339                 if (seg->sh_prev == seg->sh_next)
 340                         seg->sh_prev->flags &= ~SEG_SHARED;
 341                 seg->sh_next = seg->sh_prev = seg;
 342         } else {
 343                 if (mmu_map(map->pgd, seg->phys, seg->addr, seg->size,
 344                             map_type))
 345                         return ENOMEM;
 346         }
 347         seg->flags = new_flags;
 348         return 0;
 349 }
 350 
 351 /**
 352  * vm_map - map another task's memory to current task.
 353  *
 354  * Note: This routine does not support mapping to the specific address.
 355  */
 356 int
 357 vm_map(task_t target, void *addr, size_t size, void **alloc)
 358 {
 359         int error;
 360 
 361         sched_lock();
 362         if (!task_valid(target)) {
 363                 sched_unlock();
 364                 return ESRCH;
 365         }
 366         if (target == curtask) {
 367                 sched_unlock();
 368                 return EINVAL;
 369         }
 370         if (!task_capable(CAP_EXTMEM)) {
 371                 sched_unlock();
 372                 return EPERM;
 373         }
 374         if (!user_area(addr)) {
 375                 sched_unlock();
 376                 return EFAULT;
 377         }
 378 
 379         error = do_map(target->map, addr, size, alloc);
 380 
 381         sched_unlock();
 382         return error;
 383 }
 384 
 385 static int
 386 do_map(vm_map_t map, void *addr, size_t size, void **alloc)
 387 {
 388         struct seg *seg, *cur, *tgt;
 389         vm_map_t curmap;
 390         vaddr_t start, end;
 391         paddr_t pa;
 392         size_t offset;
 393         int map_type;
 394         void *tmp;
 395 
 396         if (size == 0)
 397                 return EINVAL;
 398         if (map->total + size >= MAXMEM)
 399                 return ENOMEM;
 400 
 401         /* check fault */
 402         tmp = NULL;
 403         if (copyout(&tmp, alloc, sizeof(tmp)))
 404                 return EFAULT;
 405 
 406         start = trunc_page((vaddr_t)addr);
 407         end = round_page((vaddr_t)addr + size);
 408         size = (size_t)(end - start);
 409         offset = (size_t)((vaddr_t)addr - start);
 410 
 411         /*
 412          * Find the segment that includes target address
 413          */
 414         seg = seg_lookup(&map->head, start, size);
 415         if (seg == NULL || (seg->flags & SEG_FREE))
 416                 return EINVAL;  /* not allocated */
 417         tgt = seg;
 418 
 419         /*
 420          * Find the free segment in current task
 421          */
 422         curmap = curtask->map;
 423         if ((seg = seg_alloc(&curmap->head, size)) == NULL)
 424                 return ENOMEM;
 425         cur = seg;
 426 
 427         /*
 428          * Try to map into current memory
 429          */
 430         if (tgt->flags & SEG_WRITE)
 431                 map_type = PG_WRITE;
 432         else
 433                 map_type = PG_READ;
 434 
 435         pa = tgt->phys + (paddr_t)(start - tgt->addr);
 436         if (mmu_map(curmap->pgd, pa, cur->addr, size, map_type)) {
 437                 seg_free(&curmap->head, seg);
 438                 return ENOMEM;
 439         }
 440 
 441         cur->flags = tgt->flags | SEG_MAPPED;
 442         cur->phys = pa;
 443 
 444         tmp = (void *)(cur->addr + offset);
 445         copyout(&tmp, alloc, sizeof(tmp));
 446 
 447         curmap->total += size;
 448         return 0;
 449 }
 450 
 451 /*
 452  * Create new virtual memory space.
 453  * No memory is inherited.
 454  *
 455  * Must be called with scheduler locked.
 456  */
 457 vm_map_t
 458 vm_create(void)
 459 {
 460         struct vm_map *map;
 461 
 462         /* Allocate new map structure */
 463         if ((map = kmem_alloc(sizeof(*map))) == NULL)
 464                 return NULL;
 465 
 466         map->refcnt = 1;
 467         map->total = 0;
 468 
 469         /* Allocate new page directory */
 470         if ((map->pgd = mmu_newmap()) == NO_PGD) {
 471                 kmem_free(map);
 472                 return NULL;
 473         }
 474         seg_init(&map->head);
 475         return map;
 476 }
 477 
 478 /*
 479  * Terminate specified virtual memory space.
 480  * This is called when task is terminated.
 481  */
 482 void
 483 vm_terminate(vm_map_t map)
 484 {
 485         struct seg *seg, *tmp;
 486 
 487         if (--map->refcnt > 0)
 488                 return;
 489 
 490         sched_lock();
 491         seg = &map->head;
 492         do {
 493                 if (seg->flags != SEG_FREE) {
 494                         /* Unmap segment */
 495                         mmu_map(map->pgd, seg->phys, seg->addr,
 496                                 seg->size, PG_UNMAP);
 497 
 498                         /* Free segment if it is not shared and mapped */
 499                         if (!(seg->flags & SEG_SHARED) &&
 500                             !(seg->flags & SEG_MAPPED)) {
 501                                 page_free(seg->phys, seg->size);
 502                         }
 503                 }
 504                 tmp = seg;
 505                 seg = seg->next;
 506                 seg_delete(&map->head, tmp);
 507         } while (seg != &map->head);
 508 
 509         if (map == curtask->map) {
 510                 /*
 511                  * Switch to the kernel page directory before
 512                  * deleting current page directory.
 513                  */
 514                 mmu_switch(kernel_map.pgd);
 515         }
 516 
 517         mmu_terminate(map->pgd);
 518         kmem_free(map);
 519         sched_unlock();
 520 }
 521 
 522 /*
 523  * Duplicate specified virtual memory space.
 524  * This is called when new task is created.
 525  *
 526  * Returns new map id, NULL if it fails.
 527  *
 528  * All segments of original memory map are copied to new memory map.
 529  * If the segment is read-only, executable, or shared segment, it is
 530  * no need to copy. These segments are physically shared with the
 531  * original map.
 532  */
 533 vm_map_t
 534 vm_dup(vm_map_t org_map)
 535 {
 536         vm_map_t new_map;
 537 
 538         sched_lock();
 539         new_map = do_dup(org_map);
 540         sched_unlock();
 541         return new_map;
 542 }
 543 
 544 static vm_map_t
 545 do_dup(vm_map_t org_map)
 546 {
 547         vm_map_t new_map;
 548         struct seg *tmp, *src, *dest;
 549         int map_type;
 550 
 551         if ((new_map = vm_create()) == NULL)
 552                 return NULL;
 553 
 554         new_map->total = org_map->total;
 555         /*
 556          * Copy all segments
 557          */
 558         tmp = &new_map->head;
 559         src = &org_map->head;
 560 
 561         /*
 562          * Copy top segment
 563          */
 564         *tmp = *src;
 565         tmp->next = tmp->prev = tmp;
 566 
 567         if (src == src->next)   /* Blank memory ? */
 568                 return new_map;
 569 
 570         do {
 571                 ASSERT(src != NULL);
 572                 ASSERT(src->next != NULL);
 573 
 574                 if (src == &org_map->head) {
 575                         dest = tmp;
 576                 } else {
 577                         /* Create new segment struct */
 578                         dest = kmem_alloc(sizeof(*dest));
 579                         if (dest == NULL)
 580                                 return NULL;
 581 
 582                         *dest = *src;   /* memcpy */
 583 
 584                         dest->prev = tmp;
 585                         dest->next = tmp->next;
 586                         tmp->next->prev = dest;
 587                         tmp->next = dest;
 588                         tmp = dest;
 589                 }
 590                 if (src->flags == SEG_FREE) {
 591                         /*
 592                          * Skip free segment
 593                          */
 594                 } else {
 595                         /* Check if the segment can be shared */
 596                         if (!(src->flags & SEG_WRITE) &&
 597                             !(src->flags & SEG_MAPPED)) {
 598                                 dest->flags |= SEG_SHARED;
 599                         }
 600 
 601                         if (!(dest->flags & SEG_SHARED)) {
 602                                 /* Allocate new physical page. */
 603                                 dest->phys = page_alloc(src->size);
 604                                 if (dest->phys == 0)
 605                                         return NULL;
 606 
 607                                 /* Copy source page */
 608                                 memcpy(ptokv(dest->phys), ptokv(src->phys),
 609                                        src->size);
 610                         }
 611                         /* Map the segment to virtual address */
 612                         if (dest->flags & SEG_WRITE)
 613                                 map_type = PG_WRITE;
 614                         else
 615                                 map_type = PG_READ;
 616 
 617                         if (mmu_map(new_map->pgd, dest->phys, dest->addr,
 618                                     dest->size, map_type))
 619                                 return NULL;
 620                 }
 621                 src = src->next;
 622         } while (src != &org_map->head);
 623 
 624         /*
 625          * No error. Now, link all shared segments
 626          */
 627         dest = &new_map->head;
 628         src = &org_map->head;
 629         do {
 630                 if (dest->flags & SEG_SHARED) {
 631                         src->flags |= SEG_SHARED;
 632                         dest->sh_prev = src;
 633                         dest->sh_next = src->sh_next;
 634                         src->sh_next->sh_prev = dest;
 635                         src->sh_next = dest;
 636                 }
 637                 dest = dest->next;
 638                 src = src->next;
 639         } while (src != &org_map->head);
 640         return new_map;
 641 }
 642 
 643 /*
 644  * Switch VM mapping.
 645  *
 646  * Since a kernel task does not have user mode memory image, we
 647  * don't have to setup the page directory for it. Thus, an idle
 648  * thread and interrupt threads can be switched quickly.
 649  */
 650 void
 651 vm_switch(vm_map_t map)
 652 {
 653 
 654         if (map != &kernel_map)
 655                 mmu_switch(map->pgd);
 656 }
 657 
 658 /*
 659  * Increment reference count of VM mapping.
 660  */
 661 int
 662 vm_reference(vm_map_t map)
 663 {
 664 
 665         map->refcnt++;
 666         return 0;
 667 }
 668 
 669 /*
 670  * Load task image for boot task.
 671  * Return 0 on success, or errno on failure.
 672  */
 673 int
 674 vm_load(vm_map_t map, struct module *mod, void **stack)
 675 {
 676         char *src;
 677         void *text, *data;
 678         int error;
 679 
 680         DPRINTF(("Loading task: %s\n", mod->name));
 681 
 682         /*
 683          * We have to switch VM mapping to touch the virtual
 684          * memory space of a target task without page fault.
 685          */
 686         vm_switch(map);
 687 
 688         src = ptokv(mod->phys);
 689         text = (void *)mod->text;
 690         data = (void *)mod->data;
 691 
 692         /*
 693          * Create text segment
 694          */
 695         error = do_allocate(map, &text, mod->textsz, 0);
 696         if (error)
 697                 return error;
 698         memcpy(text, src, mod->textsz);
 699         error = do_attribute(map, text, PROT_READ);
 700         if (error)
 701                 return error;
 702 
 703         /*
 704          * Create data & BSS segment
 705          */
 706         if (mod->datasz + mod->bsssz != 0) {
 707                 error = do_allocate(map, &data, mod->datasz + mod->bsssz, 0);
 708                 if (error)
 709                         return error;
 710                 if (mod->datasz > 0) {
 711                         src = src + (mod->data - mod->text);
 712                         memcpy(data, src, mod->datasz);
 713                 }
 714         }
 715         /*
 716          * Create stack
 717          */
 718         *stack = (void *)USRSTACK;
 719         error = do_allocate(map, stack, DFLSTKSZ, 0);
 720         if (error)
 721                 return error;
 722 
 723         /* Free original pages */
 724         page_free(mod->phys, mod->size);
 725         return 0;
 726 }
 727 
 728 /*
 729  * Translate virtual address of current task to physical address.
 730  * Returns physical address on success, or NULL if no mapped memory.
 731  */
 732 paddr_t
 733 vm_translate(vaddr_t addr, size_t size)
 734 {
 735 
 736         return mmu_extract(curtask->map->pgd, addr, size);
 737 }
 738 
 739 int
 740 vm_info(struct vminfo *info)
 741 {
 742         u_long target = info->cookie;
 743         task_t task = info->task;
 744         u_long i;
 745         vm_map_t map;
 746         struct seg *seg;
 747 
 748         sched_lock();
 749         if (!task_valid(task)) {
 750                 sched_unlock();
 751                 return ESRCH;
 752         }
 753         map = task->map;
 754         seg = &map->head;
 755         i = 0;
 756         do {
 757                 if (i++ == target) {
 758                         info->cookie = i;
 759                         info->virt = seg->addr;
 760                         info->size = seg->size;
 761                         info->flags = seg->flags;
 762                         info->phys = seg->phys;
 763                         sched_unlock();
 764                         return 0;
 765                 }
 766                 seg = seg->next;
 767         } while (seg != &map->head);
 768         sched_unlock();
 769         return ESRCH;
 770 }
 771 
 772 void
 773 vm_init(void)
 774 {
 775         pgd_t pgd;
 776 
 777         /*
 778          * Setup vm mapping for kernel task.
 779          */
 780         if ((pgd = mmu_newmap()) == NO_PGD)
 781                 panic("vm_init");
 782         kernel_map.pgd = pgd;
 783         mmu_switch(pgd);
 784 
 785         seg_init(&kernel_map.head);
 786         kernel_task.map = &kernel_map;
 787 }
 788 
 789 
 790 /*
 791  * Initialize segment.
 792  */
 793 static void
 794 seg_init(struct seg *seg)
 795 {
 796 
 797         seg->next = seg->prev = seg;
 798         seg->sh_next = seg->sh_prev = seg;
 799         seg->addr = PAGE_SIZE;
 800         seg->phys = 0;
 801         seg->size = USERLIMIT - PAGE_SIZE;
 802         seg->flags = SEG_FREE;
 803 }
 804 
 805 /*
 806  * Create new free segment after the specified segment.
 807  * Returns segment on success, or NULL on failure.
 808  */
 809 static struct seg *
 810 seg_create(struct seg *prev, vaddr_t addr, size_t size)
 811 {
 812         struct seg *seg;
 813 
 814         if ((seg = kmem_alloc(sizeof(*seg))) == NULL)
 815                 return NULL;
 816 
 817         seg->addr = addr;
 818         seg->size = size;
 819         seg->phys = 0;
 820         seg->flags = SEG_FREE;
 821         seg->sh_next = seg->sh_prev = seg;
 822 
 823         seg->next = prev->next;
 824         seg->prev = prev;
 825         prev->next->prev = seg;
 826         prev->next = seg;
 827 
 828         return seg;
 829 }
 830 
 831 /*
 832  * Delete specified segment.
 833  */
 834 static void
 835 seg_delete(struct seg *head, struct seg *seg)
 836 {
 837 
 838         /*
 839          * If it is shared segment, unlink from shared list.
 840          */
 841         if (seg->flags & SEG_SHARED) {
 842                 seg->sh_prev->sh_next = seg->sh_next;
 843                 seg->sh_next->sh_prev = seg->sh_prev;
 844                 if (seg->sh_prev == seg->sh_next)
 845                         seg->sh_prev->flags &= ~SEG_SHARED;
 846         }
 847         if (head != seg)
 848                 kmem_free(seg);
 849 }
 850 
 851 /*
 852  * Find the segment at the specified address.
 853  */
 854 static struct seg *
 855 seg_lookup(struct seg *head, vaddr_t addr, size_t size)
 856 {
 857         struct seg *seg;
 858 
 859         seg = head;
 860         do {
 861                 if (seg->addr <= addr &&
 862                     seg->addr + seg->size >= addr + size) {
 863                         return seg;
 864                 }
 865                 seg = seg->next;
 866         } while (seg != head);
 867         return NULL;
 868 }
 869 
 870 /*
 871  * Allocate free segment for specified size.
 872  */
 873 static struct seg *
 874 seg_alloc(struct seg *head, size_t size)
 875 {
 876         struct seg *seg;
 877 
 878         seg = head;
 879         do {
 880                 if ((seg->flags & SEG_FREE) && seg->size >= size) {
 881                         if (seg->size != size) {
 882                                 /*
 883                                  * Split this segment and return its head.
 884                                  */
 885                                 if (seg_create(seg,
 886                                                seg->addr + size,
 887                                                seg->size - size) == NULL)
 888                                         return NULL;
 889                         }
 890                         seg->size = size;
 891                         return seg;
 892                 }
 893                 seg = seg->next;
 894         } while (seg != head);
 895         return NULL;
 896 }
 897 
 898 /*
 899  * Delete specified free segment.
 900  */
 901 static void
 902 seg_free(struct seg *head, struct seg *seg)
 903 {
 904         struct seg *prev, *next;
 905 
 906         ASSERT(seg->flags != SEG_FREE);
 907 
 908         seg->flags = SEG_FREE;
 909 
 910         /*
 911          * If it is shared segment, unlink from shared list.
 912          */
 913         if (seg->flags & SEG_SHARED) {
 914                 seg->sh_prev->sh_next = seg->sh_next;
 915                 seg->sh_next->sh_prev = seg->sh_prev;
 916                 if (seg->sh_prev == seg->sh_next)
 917                         seg->sh_prev->flags &= ~SEG_SHARED;
 918         }
 919         /*
 920          * If next segment is free, merge with it.
 921          */
 922         next = seg->next;
 923         if (next != head && (next->flags & SEG_FREE)) {
 924                 seg->next = next->next;
 925                 next->next->prev = seg;
 926                 seg->size += next->size;
 927                 kmem_free(next);
 928         }
 929         /*
 930          * If previous segment is free, merge with it.
 931          */
 932         prev = seg->prev;
 933         if (seg != head && (prev->flags & SEG_FREE)) {
 934                 prev->next = seg->next;
 935                 seg->next->prev = prev;
 936                 prev->size += seg->size;
 937                 kmem_free(seg);
 938         }
 939 }
 940 
 941 /*
 942  * Reserve the segment at the specified address/size.
 943  */
 944 static struct seg *
 945 seg_reserve(struct seg *head, vaddr_t addr, size_t size)
 946 {
 947         struct seg *seg, *prev, *next;
 948         size_t diff;
 949 
 950         /*
 951          * Find the block which includes specified block.
 952          */
 953         seg = seg_lookup(head, addr, size);
 954         if (seg == NULL || !(seg->flags & SEG_FREE))
 955                 return NULL;
 956 
 957         /*
 958          * Check previous segment to split segment.
 959          */
 960         prev = NULL;
 961         if (seg->addr != addr) {
 962                 prev = seg;
 963                 diff = (size_t)(addr - seg->addr);
 964                 seg = seg_create(prev, addr, prev->size - diff);
 965                 if (seg == NULL)
 966                         return NULL;
 967                 prev->size = diff;
 968         }
 969         /*
 970          * Check next segment to split segment.
 971          */
 972         if (seg->size != size) {
 973                 next = seg_create(seg, seg->addr + size, seg->size - size);
 974                 if (next == NULL) {
 975                         if (prev) {
 976                                 /* Undo previous seg_create() operation */
 977                                 seg_free(head, seg);
 978                         }
 979                         return NULL;
 980                 }
 981                 seg->size = size;
 982         }
 983         seg->flags = 0;
 984         return seg;
 985 }

/* [<][>][^][v][top][bottom][index][help] */