diff --git a/sortix/include/sortix/kernel/memorymanagement.h b/sortix/include/sortix/kernel/memorymanagement.h index 9a360c24..8bd0e232 100644 --- a/sortix/include/sortix/kernel/memorymanagement.h +++ b/sortix/include/sortix/kernel/memorymanagement.h @@ -30,6 +30,8 @@ typedef struct multiboot_info multiboot_info_t; namespace Sortix { + class Process; + namespace Page { bool Reserve(size_t* counter, size_t amount); @@ -84,6 +86,9 @@ namespace Sortix size_t GetKernelStackSize(); void GetKernelVirtualArea(addr_t* from, size_t* size); void GetUserVirtualArea(uintptr_t* from, size_t* size); + void UnmapMemory(Process* process, uintptr_t addr, size_t size); + bool ProtectMemory(Process* process, uintptr_t addr, size_t size, int prot); + bool MapMemory(Process* process, uintptr_t addr, size_t size, int prot); } } diff --git a/sortix/memorymanagement.cpp b/sortix/memorymanagement.cpp index 54a46ad0..48357038 100644 --- a/sortix/memorymanagement.cpp +++ b/sortix/memorymanagement.cpp @@ -22,8 +22,20 @@ *******************************************************************************/ -#include +#include + +#include +#include +#include +#include +#include + +#include + +#include #include +#include +#include #include namespace Sortix { @@ -42,6 +54,198 @@ static int sys_memstat(size_t* memused, size_t* memtotal) return 0; } +void UnmapMemory(Process* process, uintptr_t addr, size_t size) +{ + // process->segment_lock is held. + assert(Page::IsAligned(addr)); + assert(Page::IsAligned(size)); + assert(process == CurrentProcess()); + + struct segment unmap_segment; + unmap_segment.addr = addr; + unmap_segment.size = size; + unmap_segment.prot = 0; + while ( struct segment* conflict = FindOverlappingSegment(process, + &unmap_segment) ) + { + // Delete the segment if covered entirely by our request. + if ( addr <= conflict->addr && conflict->addr + conflict->size - addr <= size ) + { + uintptr_t conflict_offset = (uintptr_t) conflict - (uintptr_t) process->segments; + size_t conflict_index = conflict_offset / sizeof(struct segment); + Memory::UnmapRange(conflict->addr, conflict->size); + Memory::Flush(); + if ( conflict_index + 1 == process->segments_used ) + { + process->segments_used--; + continue; + } + process->segments[conflict_index] = process->segments[--process->segments_used]; + qsort(process->segments, process->segments_used, + sizeof(struct segment), segmentcmp); + continue; + } + + // Delete the middle of the segment if covered there by our request. + if ( conflict->addr < addr && addr + size - conflict->addr <= conflict->size ) + { + Memory::UnmapRange(addr, size); + Memory::Flush(); + struct segment right_segment; + right_segment.addr = addr + size; + right_segment.size = conflict->addr + conflict->size - (addr + size); + conflict->size = addr - conflict->addr; + // TODO: This shouldn't really fail as we free memory above, but + // this code isn't really provably reliable. + if ( !AddSegment(process, &right_segment) ) + PanicF("Unexpectedly unable to split memory mapped segment"); + continue; + } + + // Delete the part of the segment covered partially from the left. + if ( addr <= conflict->addr ) + { + Memory::UnmapRange(conflict->addr, addr + size - conflict->addr); + Memory::Flush(); + conflict->size = conflict->addr + conflict->size - (addr + size); + conflict->addr = addr + size; + continue; + } + + // Delete the part of the segment covered partially from the right. + if ( conflict->addr + size <= addr + size ) + { + Memory::UnmapRange(addr, addr + conflict->size + conflict->addr); + Memory::Flush(); + conflict->size -= conflict->size + conflict->addr; + continue; + } + } +} + +bool ProtectMemory(Process* process, uintptr_t addr, size_t size, int prot) +{ + // process->segment_lock is held. + assert(Page::IsAligned(addr)); + assert(Page::IsAligned(size)); + assert(process == CurrentProcess()); + + // First split the segments overlapping with [addr, addr + size) into + // smaller segments that doesn't cross addr and addr+size, while verifying + // there are no gaps in that region. This is where the operation can fail as + // the AddSegtment call can run out of memory. There is no harm in splitting + // the segments into smaller chunks. + for ( size_t offset = 0; offset < size; ) + { + struct segment search_region; + search_region.addr = addr + offset; + search_region.size = Page::Size(); + search_region.prot = prot; + struct segment* segment = FindOverlappingSegment(process, &search_region); + + if ( !segment ) + return errno = EINVAL, false; + + // Split the segment into two if it begins before our search region. + if ( segment->addr < search_region.addr ) + { + struct segment new_segment; + new_segment.addr = search_region.addr; + new_segment.size = segment->addr + segment->size - new_segment.addr; + new_segment.prot = segment->prot; + segment->size = search_region.addr - segment->addr; + + if ( !AddSegment(process, &new_segment) ) + { + segment->size += new_segment.size; + return false; + } + + continue; + } + + // Split the segment into two if it ends after addr + size. + if ( size < segment->addr + segment->size - addr ) + { + struct segment new_segment; + new_segment.addr = addr + size; + new_segment.size = segment->addr + segment->size - new_segment.addr; + new_segment.prot = segment->prot; + segment->size = addr + size - segment->addr; + + if ( !AddSegment(process, &new_segment) ) + { + segment->size += new_segment.size; + return false; + } + + continue; + } + + offset += segment->size; + } + + // Run through all the segments in the region [addr, addr+size) and change + // the permissions and update the permissions of the virtual memory itself. + for ( size_t offset = 0; offset < size; ) + { + struct segment search_region; + search_region.addr = addr + offset; + search_region.size = Page::Size(); + search_region.prot = prot; + struct segment* segment = FindOverlappingSegment(process, &search_region); + assert(segment); + + if ( segment->prot != prot ) + { + // TODO: There is a moment of inconsistency here when the segment + // table itself has another protection written than what + // what applies to the actual pages. + segment->prot = prot; + for ( size_t i = 0; i < segment->size; i += Page::Size() ) + Memory::PageProtect(segment->addr + i, prot); + } + + offset += segment->size; + } + + return true; +} + +bool MapMemory(Process* process, uintptr_t addr, size_t size, int prot) +{ + // process->segment_lock is held. + assert(Page::IsAligned(addr)); + assert(Page::IsAligned(size)); + assert(process == CurrentProcess()); + + UnmapMemory(process, addr, size); + + struct segment new_segment; + new_segment.addr = addr; + new_segment.size = size; + new_segment.prot = prot; + + if ( !MapRange(new_segment.addr, new_segment.size, new_segment.prot) ) + return false; + Memory::Flush(); + + if ( !AddSegment(process, &new_segment) ) + { + UnmapRange(new_segment.addr, new_segment.size); + Memory::Flush(); + return false; + } + + // We have process->segment_lock locked, so we know that the memory in user + // space exists and we can safely zero it here. + // TODO: Another thread is able to see the old contents of the memory before + // we zero it causing potential information leaks. + memset((void*) new_segment.addr, 0, new_segment.size); + + return true; +} + void InitCPU(multiboot_info_t* bootinfo); void Init(multiboot_info_t* bootinfo)