Skip to content

Commit

Permalink
fix(vm/mman): handle memory entry chunks splitting properly and munmap
Browse files Browse the repository at this point in the history
of larger chunks

Do not perform amap_getanons for entry splitting.
Handle munmap of chunks larger than SIZE_PAGE properly.

JIRA: RTOS-893
  • Loading branch information
badochov committed Aug 21, 2024
1 parent fa2cb46 commit f9c1ba8
Showing 1 changed file with 65 additions and 44 deletions.
109 changes: 65 additions & 44 deletions vm/map.c
Original file line number Diff line number Diff line change
Expand Up @@ -370,12 +370,28 @@ void *vm_mapFind(vm_map_t *map, void *vaddr, size_t size, u8 flags, u8 prot)
}


static void vm_mapEntryCopy(map_entry_t *dst, map_entry_t *src, int refAnons)
{
hal_memcpy(dst, src, sizeof(map_entry_t));
src->amap = amap_ref(dst->amap);
/* In case of splitting the entry the anons shouldn't be reffed as they just change the owner. */
if (refAnons != 0) {
amap_getanons(dst->amap, dst->aoffs, dst->size);
}
src->object = vm_objectRef(dst->object);
}


int _vm_munmap(vm_map_t *map, void *vaddr, size_t size)
{
long offs;
ptr_t pvaddr;
map_entry_t *e, *s;
map_entry_t t;
process_t *proc = proc_current()->process;
size_t overlapEOffset;
size_t overlapSize;
ptr_t overlapStart, overlapEnd;
int putEntry;

t.vaddr = vaddr;
t.size = size;
Expand All @@ -387,74 +403,85 @@ int _vm_munmap(vm_map_t *map, void *vaddr, size_t size)
if (e == NULL) {
break;
}

#ifdef NOMMU
proc = e->process;
#endif
overlapStart = max((ptr_t)e->vaddr, (ptr_t)vaddr);
overlapEnd = min((ptr_t)e->vaddr + e->size, (ptr_t)vaddr + size);
overlapSize = (size_t)(overlapEnd - overlapStart);
overlapEOffset = (size_t)(overlapStart - (ptr_t)e->vaddr);

/* Note: what if NEEDS_COPY? */
amap_putanons(e->amap, (e->aoffs + vaddr) - e->vaddr, size);

for (offs = (vaddr - e->vaddr); offs < ((vaddr + size) - e->vaddr); offs += SIZE_PAGE) {
pmap_remove(&map->pmap, e->vaddr + offs);
}
putEntry = 0;

if (e->vaddr == vaddr) {
if (e->size == size) {
_entry_put(map, e);
if ((ptr_t)e->vaddr == overlapStart) {
if (e->size == overlapSize) {
putEntry = 0;
}
else {
e->aoffs += size;
e->vaddr += size;
e->size -= size;
e->lmaxgap += size;
e->aoffs += overlapSize;
e->offs = (e->offs == -1) ? -1 : (e->offs + overlapSize);
e->vaddr += overlapSize;
e->size -= overlapSize;
e->lmaxgap += overlapSize;

s = lib_treeof(map_entry_t, linkage, lib_rbPrev(&e->linkage));
if (s != NULL) {
s->rmaxgap += size;
s->rmaxgap += overlapSize;
map_augment(&s->linkage);
}

map_augment(&e->linkage);
}
}
else if ((e->vaddr + e->size) == (vaddr + size)) {
e->size -= size;
e->rmaxgap += size;
else if ((ptr_t)(e->vaddr + e->size) == overlapEnd) {
e->size -= overlapSize;
e->rmaxgap += overlapSize;

s = lib_treeof(map_entry_t, linkage, lib_rbNext(&e->linkage));
if (s != NULL) {
s->lmaxgap += size;
s->lmaxgap += overlapSize;
map_augment(&s->linkage);
}

map_augment(&e->linkage);
}
else {
/* This case is only possible if unmapped region is in the middle of single entry,
* so there is no possibility of partially unmapping.
* Allocate memory for new entry starting from overlapEnd.
*/
s = map_alloc();
/* This case if only possible if unmapped region if in the middle of single entry,
* so there is no possibility of partially unmapping. */
if (s == NULL) {
return -ENOMEM;
}

s->flags = e->flags;
s->prot = e->prot;
s->protOrig = e->protOrig;
s->object = vm_objectRef(e->object);
s->offs = (e->offs == -1) ? -1 : (e->offs + ((vaddr + size) - e->vaddr));
s->vaddr = vaddr + size;
s->size = (size_t)((e->vaddr + e->size) - s->vaddr);
s->aoffs = e->aoffs + ((vaddr + size) - e->vaddr);
vm_mapEntryCopy(s, e, 0);

s->amap = amap_ref(e->amap);
s->offs = (e->offs == -1) ? -1 : (e->offs + overlapEOffset + overlapSize);
s->vaddr = (void *)overlapEnd;
s->size -= overlapEOffset + overlapSize;
s->aoffs += overlapEOffset + overlapSize;
s->lmaxgap = overlapSize;

e->size = (size_t)(vaddr - e->vaddr);
e->rmaxgap = size;
e->size = (size_t)(overlapStart - (ptr_t)e->vaddr);
e->rmaxgap = overlapSize;

map_augment(&e->linkage);
_map_add(proc, map, s);
}

/* Perform amap and pmap changes only when we are sure we have enough space to perform corresponding map changes. */

/* Note: what if NEEDS_COPY? */
amap_putanons(e->amap, e->aoffs + (int)overlapEOffset, overlapSize);

for (pvaddr = overlapStart; pvaddr < overlapEnd; pvaddr += SIZE_PAGE) {
pmap_remove(&map->pmap, (void *)pvaddr);
}

if (putEntry != 0) {
_entry_put(map, e);
}
}

return EOK;
Expand Down Expand Up @@ -747,22 +774,15 @@ int vm_munmap(vm_map_t *map, void *vaddr, size_t size)
}


static void vm_mapEntryCopy(map_entry_t *dst, map_entry_t *src)
{
hal_memcpy(dst, src, sizeof(map_entry_t));
dst->amap = amap_ref(src->amap);
amap_getanons(dst->amap, dst->aoffs, dst->size);
dst->object = vm_objectRef(src->object);
}


static void vm_mapEntrySplit(process_t *p, vm_map_t *m, map_entry_t *e, map_entry_t *new, size_t len)
{
vm_mapEntryCopy(new, e);
vm_mapEntryCopy(new, e, 0);

new->vaddr += len;
new->size -= len;
new->aoffs += len;
new->offs = (new->offs == -1) ? -1 : (new->aoffs + len);
new->lmaxgap = 0;

e->size = len;
e->rmaxgap = 0;
Expand Down Expand Up @@ -1028,7 +1048,8 @@ int vm_mapCopy(process_t *proc, vm_map_t *dst, vm_map_t *src)
return -ENOMEM;
}

vm_mapEntryCopy(f, e);

vm_mapEntryCopy(f, e, 1);
_map_add(proc, dst, f);

if ((e->prot & PROT_WRITE) && !(e->flags & MAP_DEVICE)) {
Expand Down

0 comments on commit f9c1ba8

Please sign in to comment.