summaryrefslogtreecommitdiff
path: root/arch/um/os-Linux
diff options
context:
space:
mode:
authorBenjamin Berg <benjamin.berg@intel.com>2024-07-03 15:45:35 +0200
committerJohannes Berg <johannes.berg@intel.com>2024-07-03 17:09:50 +0200
commit573a446fc8ea4ca9be60b1db2091297da48d0a0d (patch)
treee2354f08dc0a3372a49211304d2ce1a199e4b37b /arch/um/os-Linux
parentef714f15027ca6f72e90d9a198c72e93b855e2a8 (diff)
um: simplify and consolidate TLB updates
The HVC update was mostly used to compress consecutive calls into one. This is mostly relevant for userspace where it is already handled by the syscall stub code. Simplify the whole logic and consolidate it for both kernel and userspace. This does remove the sequential syscall compression for the kernel, however that shouldn't be the main factor in most runs. Signed-off-by: Benjamin Berg <benjamin.berg@intel.com> Link: https://patch.msgid.link/20240703134536.1161108-12-benjamin@sipsolutions.net Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'arch/um/os-Linux')
-rw-r--r--arch/um/os-Linux/skas/mem.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/arch/um/os-Linux/skas/mem.c b/arch/um/os-Linux/skas/mem.c
index 46fa10ab9892..c55430775efd 100644
--- a/arch/um/os-Linux/skas/mem.c
+++ b/arch/um/os-Linux/skas/mem.c
@@ -175,7 +175,7 @@ static struct stub_syscall *syscall_stub_get_previous(struct mm_id *mm_idp,
return NULL;
}
-void map(struct mm_id *mm_idp, unsigned long virt, unsigned long len, int prot,
+int map(struct mm_id *mm_idp, unsigned long virt, unsigned long len, int prot,
int phys_fd, unsigned long long offset)
{
struct stub_syscall *sc;
@@ -185,7 +185,7 @@ void map(struct mm_id *mm_idp, unsigned long virt, unsigned long len, int prot,
if (sc && sc->mem.prot == prot && sc->mem.fd == phys_fd &&
sc->mem.offset == MMAP_OFFSET(offset - sc->mem.length)) {
sc->mem.length += len;
- return;
+ return 0;
}
sc = syscall_stub_alloc(mm_idp);
@@ -195,9 +195,11 @@ void map(struct mm_id *mm_idp, unsigned long virt, unsigned long len, int prot,
sc->mem.prot = prot;
sc->mem.fd = phys_fd;
sc->mem.offset = MMAP_OFFSET(offset);
+
+ return 0;
}
-void unmap(struct mm_id *mm_idp, unsigned long addr, unsigned long len)
+int unmap(struct mm_id *mm_idp, unsigned long addr, unsigned long len)
{
struct stub_syscall *sc;
@@ -205,16 +207,18 @@ void unmap(struct mm_id *mm_idp, unsigned long addr, unsigned long len)
sc = syscall_stub_get_previous(mm_idp, STUB_SYSCALL_MUNMAP, addr);
if (sc) {
sc->mem.length += len;
- return;
+ return 0;
}
sc = syscall_stub_alloc(mm_idp);
sc->syscall = STUB_SYSCALL_MUNMAP;
sc->mem.addr = addr;
sc->mem.length = len;
+
+ return 0;
}
-void protect(struct mm_id *mm_idp, unsigned long addr, unsigned long len,
+int protect(struct mm_id *mm_idp, unsigned long addr, unsigned long len,
unsigned int prot)
{
struct stub_syscall *sc;
@@ -223,7 +227,7 @@ void protect(struct mm_id *mm_idp, unsigned long addr, unsigned long len,
sc = syscall_stub_get_previous(mm_idp, STUB_SYSCALL_MPROTECT, addr);
if (sc && sc->mem.prot == prot) {
sc->mem.length += len;
- return;
+ return 0;
}
sc = syscall_stub_alloc(mm_idp);
@@ -231,4 +235,6 @@ void protect(struct mm_id *mm_idp, unsigned long addr, unsigned long len,
sc->mem.addr = addr;
sc->mem.length = len;
sc->mem.prot = prot;
+
+ return 0;
}