commit 8bdc1cdb75cc9f9cb104405b2dcd5e3b545c7bde
Author: Sérgio M. Basto <sergio(a)serjux.com>
Date: Thu Aug 6 12:35:47 2020 +0100
Updates for kernel-5.8
088da92603cb4f1175ed6d0c452b049af3372c1e.patch | 65 +++++++
3c981196de564d78aa8c653496f7fefe303bf7b6.patch | 47 +++++
521d08e75cb85b0dad89643d2a9de39dfb6f8832.patch | 160 ++++++++++++++++
6370c9d7c7908f7072b654f9794ed6c5d562768b.patch | 41 +++++
842e5679b0904a80c7f065ad50417d82af265395.patch | 27 +++
9f9e3db9d80be17d6fc9be48b6d8745c971fca99.patch | 241 +++++++++++++++++++++++++
VirtualBox.spec | 21 ++-
b0f29563e5a7e5d4af8585ee0fffe208d3f528d2.patch | 27 +++
update_vbox.sh | 2 +-
9 files changed, 628 insertions(+), 3 deletions(-)
---
diff --git a/088da92603cb4f1175ed6d0c452b049af3372c1e.patch
b/088da92603cb4f1175ed6d0c452b049af3372c1e.patch
new file mode 100644
index 0000000..3ee086b
--- /dev/null
+++ b/088da92603cb4f1175ed6d0c452b049af3372c1e.patch
@@ -0,0 +1,65 @@
+From 088da92603cb4f1175ed6d0c452b049af3372c1e Mon Sep 17 00:00:00 2001
+From: vboxsync <vboxsync@cfe28804-0f27-0410-a406-dd0f0b0b656f>
+Date: Wed, 29 Jul 2020 10:44:46 +0000
+Subject: [PATCH] IPRT/memobj-r0drv-linux.c: Make it easier to push the W^X
+ semantics further back (after some testing). bugref:9801
+
+git-svn-id:
http://www.virtualbox.org/svn/vbox@85514
cfe28804-0f27-0410-a406-dd0f0b0b656f
+---
+ .../VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
b/trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
+index e42fe255b6..4edf533d45 100644
+--- a/trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
++++ b/trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
+@@ -52,6 +52,13 @@
+ # define PAGE_READONLY_EXEC PAGE_READONLY
+ #endif
+
++/** @def IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
++ * Whether we use alloc_vm_area (3.2+) for executable memory.
++ * This is a must for 5.8+, but we'll enable it for earlier kernels later. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) || defined(DOXYGEN_RUNNING)
++# define IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
++#endif
++
+ /*
+ * 2.6.29+ kernels don't work with remap_pfn_range() anymore because
+ * track_pfn_vma_new() is apparently not defined for non-RAM pages.
+@@ -105,7 +112,7 @@ typedef struct RTR0MEMOBJLNX
+ bool fExecutable;
+ /** Set if we've vmap'ed the memory into ring-0. */
+ bool fMappedToRing0;
+-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
++#ifdef IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
+ /** Return from alloc_vm_area() that we now need to use for executable
+ * memory. */
+ struct vm_struct *pArea;
+@@ -544,7 +551,7 @@ static int rtR0MemObjLinuxVMap(PRTR0MEMOBJLNX pMemLnx, bool
fExecutable)
+ pgprot_val(fPg) |= _PAGE_NX;
+ # endif
+
+-# if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
++# ifdef IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
+ if (fExecutable)
+ {
+ pte_t **papPtes = (pte_t **)kmalloc_array(pMemLnx->cPages,
sizeof(papPtes[0]), GFP_KERNEL);
+@@ -612,7 +619,7 @@ static int rtR0MemObjLinuxVMap(PRTR0MEMOBJLNX pMemLnx, bool
fExecutable)
+ static void rtR0MemObjLinuxVUnmap(PRTR0MEMOBJLNX pMemLnx)
+ {
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
+-# if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
++# ifdef IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
+ if (pMemLnx->pArea)
+ {
+ # if 0
+@@ -1828,7 +1835,7 @@ DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem,
RTR0MEMOBJ p
+
+ DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t
cbSub, uint32_t fProt)
+ {
+-# if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
++# ifdef IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
+ /*
+ * Currently only supported when we've got addresses PTEs from the kernel.
+ */
diff --git a/3c981196de564d78aa8c653496f7fefe303bf7b6.patch
b/3c981196de564d78aa8c653496f7fefe303bf7b6.patch
new file mode 100644
index 0000000..dbaabb4
--- /dev/null
+++ b/3c981196de564d78aa8c653496f7fefe303bf7b6.patch
@@ -0,0 +1,47 @@
+From 3c981196de564d78aa8c653496f7fefe303bf7b6 Mon Sep 17 00:00:00 2001
+From: vboxsync <vboxsync@cfe28804-0f27-0410-a406-dd0f0b0b656f>
+Date: Thu, 23 Jul 2020 11:58:10 +0000
+Subject: [PATCH] SUPDrv/supdrvOSChangeCR4: Adjustments for 5.8. bugref:9801
+
+git-svn-id:
http://www.virtualbox.org/svn/vbox@85431
cfe28804-0f27-0410-a406-dd0f0b0b656f
+---
+ .../HostDrivers/Support/linux/SUPDrv-linux.c | 19 ++++++++++++-------
+ 1 file changed, 12 insertions(+), 7 deletions(-)
+
+diff --git a/trunk/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c
b/trunk/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c
+index 02cea26f02..16f6e9fc9e 100644
+--- a/trunk/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c
++++ b/trunk/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c
+@@ -756,20 +756,25 @@ EXPORT_SYMBOL(SUPDrvLinuxIDC);
+
+ RTCCUINTREG VBOXCALL supdrvOSChangeCR4(RTCCUINTREG fOrMask, RTCCUINTREG fAndMask)
+ {
+-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 20, 0)
+- RTCCUINTREG uOld = this_cpu_read(cpu_tlbstate.cr4);
+- RTCCUINTREG uNew = (uOld & fAndMask) | fOrMask;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
++ RTCCUINTREG const uOld = __read_cr4();
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 20, 0)
++ RTCCUINTREG const uOld = this_cpu_read(cpu_tlbstate.cr4);
++#else
++ RTCCUINTREG const uOld = ASMGetCR4();
++#endif
++ RTCCUINTREG const uNew = (uOld & fAndMask) | fOrMask;
+ if (uNew != uOld)
+ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
++ ASMSetCR4(uNew);
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 20, 0)
+ this_cpu_write(cpu_tlbstate.cr4, uNew);
+ __write_cr4(uNew);
+- }
+ #else
+- RTCCUINTREG uOld = ASMGetCR4();
+- RTCCUINTREG uNew = (uOld & fAndMask) | fOrMask;
+- if (uNew != uOld)
+ ASMSetCR4(uNew);
+ #endif
++ }
+ return uOld;
+ }
+
diff --git a/521d08e75cb85b0dad89643d2a9de39dfb6f8832.patch
b/521d08e75cb85b0dad89643d2a9de39dfb6f8832.patch
new file mode 100644
index 0000000..a1f8c04
--- /dev/null
+++ b/521d08e75cb85b0dad89643d2a9de39dfb6f8832.patch
@@ -0,0 +1,160 @@
+From 521d08e75cb85b0dad89643d2a9de39dfb6f8832 Mon Sep 17 00:00:00 2001
+From: vboxsync <vboxsync@cfe28804-0f27-0410-a406-dd0f0b0b656f>
+Date: Thu, 23 Jul 2020 11:57:35 +0000
+Subject: [PATCH] IPRT/memobj-r0drv-linux.c: Wrap mmap_sem operations as it was
+ renamed to mmap_lock in 5.8. bugref:9801
+
+git-svn-id:
http://www.virtualbox.org/svn/vbox@85430
cfe28804-0f27-0410-a406-dd0f0b0b656f
+---
+ .../Runtime/r0drv/linux/memobj-r0drv-linux.c | 49 ++++++++++++-------
+ 1 file changed, 32 insertions(+), 17 deletions(-)
+
+diff --git a/trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
b/trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
+index 91e77076bf..b2c9dcdea8 100644
+--- a/trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
++++ b/trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
+@@ -72,6 +72,21 @@
+ # define gfp_t unsigned
+ #endif
+
++/*
++ * Wrappers around mmap_lock/mmap_sem difference.
++ */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
++# define LNX_MM_DOWN_READ(a_pMm) down_read(&(a_pMm)->mmap_lock)
++# define LNX_MM_UP_READ(a_pMm) up_read(&(a_pMm)->mmap_lock)
++# define LNX_MM_DOWN_WRITE(a_pMm) down_write(&(a_pMm)->mmap_lock)
++# define LNX_MM_UP_WRITE(a_pMm) up_write(&(a_pMm)->mmap_lock)
++#else
++# define LNX_MM_DOWN_READ(a_pMm) down_read(&(a_pMm)->mmap_sem)
++# define LNX_MM_UP_READ(a_pMm) up_read(&(a_pMm)->mmap_sem)
++# define LNX_MM_DOWN_WRITE(a_pMm) down_write(&(a_pMm)->mmap_sem)
++# define LNX_MM_UP_WRITE(a_pMm) up_write(&(a_pMm)->mmap_sem)
++#endif
++
+
+
/*********************************************************************************************************************************
+ * Structures and Typedefs
*
+@@ -182,7 +197,7 @@ static pgprot_t rtR0MemObjLinuxConvertProt(unsigned fProt, bool
fKernel)
+ * Worker for rtR0MemObjNativeReserveUser and rtR0MemObjNativerMapUser that creates
+ * an empty user space mapping.
+ *
+- * We acquire the mmap_sem of the task!
++ * We acquire the mmap_sem/mmap_lock of the task!
+ *
+ * @returns Pointer to the mapping.
+ * (void *)-1 on failure.
+@@ -222,9 +237,9 @@ static void *rtR0MemObjLinuxDoMmap(RTR3PTR R3PtrFixed, size_t cb,
size_t uAlignm
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+ ulAddr = vm_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS |
MAP_FIXED, 0);
+ #else
+- down_write(&pTask->mm->mmap_sem);
++ LNX_MM_DOWN_WRITE(pTask->mm);
+ ulAddr = do_mmap(NULL, R3PtrFixed, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS |
MAP_FIXED, 0);
+- up_write(&pTask->mm->mmap_sem);
++ LNX_MM_UP_WRITE(pTask->mm);
+ #endif
+ }
+ else
+@@ -232,9 +247,9 @@ static void *rtR0MemObjLinuxDoMmap(RTR3PTR R3PtrFixed, size_t cb,
size_t uAlignm
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+ ulAddr = vm_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0);
+ #else
+- down_write(&pTask->mm->mmap_sem);
++ LNX_MM_DOWN_WRITE(pTask->mm);
+ ulAddr = do_mmap(NULL, 0, cb, fLnxProt, MAP_SHARED | MAP_ANONYMOUS, 0);
+- up_write(&pTask->mm->mmap_sem);
++ LNX_MM_UP_WRITE(pTask->mm);
+ #endif
+ if ( !(ulAddr & ~PAGE_MASK)
+ && (ulAddr & (uAlignment - 1)))
+@@ -257,7 +272,7 @@ static void *rtR0MemObjLinuxDoMmap(RTR3PTR R3PtrFixed, size_t cb,
size_t uAlignm
+ * Worker that destroys a user space mapping.
+ * Undoes what rtR0MemObjLinuxDoMmap did.
+ *
+- * We acquire the mmap_sem of the task!
++ * We acquire the mmap_sem/mmap_lock of the task!
+ *
+ * @param pv The ring-3 mapping.
+ * @param cb The size of the mapping.
+@@ -269,13 +284,13 @@ static void rtR0MemObjLinuxDoMunmap(void *pv, size_t cb, struct
task_struct *pTa
+ Assert(pTask == current); RT_NOREF_PV(pTask);
+ vm_munmap((unsigned long)pv, cb);
+ #elif defined(USE_RHEL4_MUNMAP)
+- down_write(&pTask->mm->mmap_sem);
++ LNX_MM_DOWN_WRITE(pTask->mm);
+ do_munmap(pTask->mm, (unsigned long)pv, cb, 0); /* should it be 1 or 0? */
+- up_write(&pTask->mm->mmap_sem);
++ LNX_MM_UP_WRITE(pTask->mm);
+ #else
+- down_write(&pTask->mm->mmap_sem);
++ LNX_MM_DOWN_WRITE(pTask->mm);
+ do_munmap(pTask->mm, (unsigned long)pv, cb);
+- up_write(&pTask->mm->mmap_sem);
++ LNX_MM_UP_WRITE(pTask->mm);
+ #endif
+ }
+
+@@ -593,7 +608,7 @@ DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
+ size_t iPage;
+ Assert(pTask);
+ if (pTask && pTask->mm)
+- down_read(&pTask->mm->mmap_sem);
++ LNX_MM_DOWN_READ(pTask->mm);
+
+ iPage = pMemLnx->cPages;
+ while (iPage-- > 0)
+@@ -608,7 +623,7 @@ DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
+ }
+
+ if (pTask && pTask->mm)
+- up_read(&pTask->mm->mmap_sem);
++ LNX_MM_UP_READ(pTask->mm);
+ }
+ /* else: kernel memory - nothing to do here. */
+ break;
+@@ -1076,7 +1091,7 @@ DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL
ppMem, RTR3PTR R3P
+ papVMAs = (struct vm_area_struct **)RTMemAlloc(sizeof(*papVMAs) * cPages);
+ if (papVMAs)
+ {
+- down_read(&pTask->mm->mmap_sem);
++ LNX_MM_DOWN_READ(pTask->mm);
+
+ /*
+ * Get user pages.
+@@ -1162,7 +1177,7 @@ DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL
ppMem, RTR3PTR R3P
+ papVMAs[rc]->vm_flags |= VM_DONTCOPY | VM_LOCKED;
+ }
+
+- up_read(&pTask->mm->mmap_sem);
++ LNX_MM_UP_READ(pTask->mm);
+
+ RTMemFree(papVMAs);
+
+@@ -1189,7 +1204,7 @@ DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL
ppMem, RTR3PTR R3P
+ #endif
+ }
+
+- up_read(&pTask->mm->mmap_sem);
++ LNX_MM_UP_READ(pTask->mm);
+
+ RTMemFree(papVMAs);
+ rc = VERR_LOCK_FAILED;
+@@ -1604,7 +1619,7 @@ DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem,
RTR0MEMOBJ p
+ const size_t cPages = (offSub + cbSub) >> PAGE_SHIFT;
+ size_t iPage;
+
+- down_write(&pTask->mm->mmap_sem);
++ LNX_MM_DOWN_WRITE(pTask->mm);
+
+ rc = VINF_SUCCESS;
+ if (pMemLnxToMap->cPages)
+@@ -1721,7 +1736,7 @@ DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem,
RTR0MEMOBJ p
+ }
+ #endif /* CONFIG_NUMA_BALANCING */
+
+- up_write(&pTask->mm->mmap_sem);
++ LNX_MM_UP_WRITE(pTask->mm);
+
+ if (RT_SUCCESS(rc))
+ {
diff --git a/6370c9d7c7908f7072b654f9794ed6c5d562768b.patch
b/6370c9d7c7908f7072b654f9794ed6c5d562768b.patch
new file mode 100644
index 0000000..cf3510e
--- /dev/null
+++ b/6370c9d7c7908f7072b654f9794ed6c5d562768b.patch
@@ -0,0 +1,41 @@
+From 6370c9d7c7908f7072b654f9794ed6c5d562768b Mon Sep 17 00:00:00 2001
+From: vboxsync <vboxsync@cfe28804-0f27-0410-a406-dd0f0b0b656f>
+Date: Wed, 29 Jul 2020 10:03:29 +0000
+Subject: [PATCH] IPRT/alloc-r0drv-linux.c,SUPDrv-linux.c: RTMEMALLOC_EXEC_HEAP
+ for 5.8+ (more on this later as it doesn't really work). bugref:9801
+
+git-svn-id:
http://www.virtualbox.org/svn/vbox@85505
cfe28804-0f27-0410-a406-dd0f0b0b656f
+---
+ trunk/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c | 4 ++--
+ trunk/src/VBox/Runtime/r0drv/linux/alloc-r0drv-linux.c | 2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/trunk/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c
b/trunk/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c
+index 16f6e9fc9e..efe873a184 100644
+--- a/trunk/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c
++++ b/trunk/src/VBox/HostDrivers/Support/linux/SUPDrv-linux.c
+@@ -144,9 +144,9 @@ static int force_async_tsc = 0;
+ * Memory for the executable memory heap (in IPRT).
+ */
+ # ifdef DEBUG
+-# define EXEC_MEMORY_SIZE 8388608 /* 8 MB */
++# define EXEC_MEMORY_SIZE 10485760 /* 10 MB */
+ # else
+-# define EXEC_MEMORY_SIZE 2097152 /* 2 MB */
++# define EXEC_MEMORY_SIZE 8388608 /* 8 MB */
+ # endif
+ extern uint8_t g_abExecMemory[EXEC_MEMORY_SIZE];
+ # ifndef VBOX_WITH_TEXT_MODMEM_HACK
+diff --git a/trunk/src/VBox/Runtime/r0drv/linux/alloc-r0drv-linux.c
b/trunk/src/VBox/Runtime/r0drv/linux/alloc-r0drv-linux.c
+index dcda77d276..b182404cec 100644
+--- a/trunk/src/VBox/Runtime/r0drv/linux/alloc-r0drv-linux.c
++++ b/trunk/src/VBox/Runtime/r0drv/linux/alloc-r0drv-linux.c
+@@ -38,7 +38,7 @@
+
+
+ #if (defined(RT_ARCH_AMD64) || defined(DOXYGEN_RUNNING)) &&
!defined(RTMEMALLOC_EXEC_HEAP)
+-# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
++# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) && LINUX_VERSION_CODE
< KERNEL_VERSION(5, 8, 0)
+ /**
+ * Starting with 2.6.23 we can use __get_vm_area and map_vm_area to allocate
+ * memory in the moduel range. This is preferrable to the exec heap below.
diff --git a/842e5679b0904a80c7f065ad50417d82af265395.patch
b/842e5679b0904a80c7f065ad50417d82af265395.patch
new file mode 100644
index 0000000..e5cf91b
--- /dev/null
+++ b/842e5679b0904a80c7f065ad50417d82af265395.patch
@@ -0,0 +1,27 @@
+From 842e5679b0904a80c7f065ad50417d82af265395 Mon Sep 17 00:00:00 2001
+From: vboxsync <vboxsync@cfe28804-0f27-0410-a406-dd0f0b0b656f>
+Date: Wed, 29 Jul 2020 11:01:45 +0000
+Subject: [PATCH] IPRT/the-linux-kernel.h: Need header for __flush_tlb_all()
+ now. bugref:9801
+
+git-svn-id:
http://www.virtualbox.org/svn/vbox@85518
cfe28804-0f27-0410-a406-dd0f0b0b656f
+---
+ trunk/src/VBox/Runtime/r0drv/linux/the-linux-kernel.h | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/trunk/src/VBox/Runtime/r0drv/linux/the-linux-kernel.h
b/trunk/src/VBox/Runtime/r0drv/linux/the-linux-kernel.h
+index 9b77283e1c..0142035c41 100644
+--- a/trunk/src/VBox/Runtime/r0drv/linux/the-linux-kernel.h
++++ b/trunk/src/VBox/Runtime/r0drv/linux/the-linux-kernel.h
+@@ -176,6 +176,11 @@
+ # include <asm/set_memory.h>
+ #endif
+
++/* for __flush_tlb_all() */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) && (defined(RT_ARCH_AMD64)
|| defined(RT_ARCH_X86))
++# include <asm/tlbflush.h>
++#endif
++
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
+ # include <asm/smap.h>
+ #else
diff --git a/9f9e3db9d80be17d6fc9be48b6d8745c971fca99.patch
b/9f9e3db9d80be17d6fc9be48b6d8745c971fca99.patch
new file mode 100644
index 0000000..2c82f93
--- /dev/null
+++ b/9f9e3db9d80be17d6fc9be48b6d8745c971fca99.patch
@@ -0,0 +1,241 @@
+From 9f9e3db9d80be17d6fc9be48b6d8745c971fca99 Mon Sep 17 00:00:00 2001
+From: vboxsync <vboxsync@cfe28804-0f27-0410-a406-dd0f0b0b656f>
+Date: Wed, 29 Jul 2020 10:02:13 +0000
+Subject: [PATCH] IPRT/memobj-r0drv*: Change the fExecutable flag to W^X
+ semantics where possible (linux 5.8+ only atm). Linux 5.8 adjustments.
+ bugref:9801
+
+git-svn-id:
http://www.virtualbox.org/svn/vbox@85504
cfe28804-0f27-0410-a406-dd0f0b0b656f
+---
+ trunk/include/iprt/memobj.h | 30 ++++--
+ .../Runtime/r0drv/linux/memobj-r0drv-linux.c | 99 +++++++++++++++++--
+ 2 files changed, 115 insertions(+), 14 deletions(-)
+
+diff --git a/trunk/include/iprt/memobj.h b/trunk/include/iprt/memobj.h
+index 2510d8be52..4925315db6 100644
+--- a/trunk/include/iprt/memobj.h
++++ b/trunk/include/iprt/memobj.h
+@@ -127,7 +127,10 @@ RTR0DECL(int) RTR0MemObjFree(RTR0MEMOBJ MemObj, bool
fFreeMappings);
+ * @returns IPRT status code.
+ * @param pMemObj Where to store the ring-0 memory object handle.
+ * @param cb Number of bytes to allocate. This is rounded up to nearest
page.
+- * @param fExecutable Flag indicating whether it should be permitted to executed
code in the memory object.
++ * @param fExecutable Flag indicating whether it should be permitted to
++ * executed code in the memory object. The user must
++ * use RTR0MemObjProtect after initialization the
++ * allocation to actually make it executable.
+ */
+ #define RTR0MemObjAllocPage(pMemObj, cb, fExecutable) \
+ RTR0MemObjAllocPageTag((pMemObj), (cb), (fExecutable), RTMEM_TAG)
+@@ -140,7 +143,10 @@ RTR0DECL(int) RTR0MemObjFree(RTR0MEMOBJ MemObj, bool
fFreeMappings);
+ * @returns IPRT status code.
+ * @param pMemObj Where to store the ring-0 memory object handle.
+ * @param cb Number of bytes to allocate. This is rounded up to nearest
page.
+- * @param fExecutable Flag indicating whether it should be permitted to executed
code in the memory object.
++ * @param fExecutable Flag indicating whether it should be permitted to
++ * executed code in the memory object. The user must
++ * use RTR0MemObjProtect after initialization the
++ * allocation to actually make it executable.
+ * @param pszTag Allocation tag used for statistics and such.
+ */
+ RTR0DECL(int) RTR0MemObjAllocPageTag(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable,
const char *pszTag);
+@@ -154,7 +160,10 @@ RTR0DECL(int) RTR0MemObjAllocPageTag(PRTR0MEMOBJ pMemObj, size_t cb,
bool fExecu
+ * @returns IPRT status code.
+ * @param pMemObj Where to store the ring-0 memory object handle.
+ * @param cb Number of bytes to allocate. This is rounded up to nearest
page.
+- * @param fExecutable Flag indicating whether it should be permitted to executed
code in the memory object.
++ * @param fExecutable Flag indicating whether it should be permitted to
++ * executed code in the memory object. The user must
++ * use RTR0MemObjProtect after initialization the
++ * allocation to actually make it executable.
+ */
+ #define RTR0MemObjAllocLow(pMemObj, cb, fExecutable) \
+ RTR0MemObjAllocLowTag((pMemObj), (cb), (fExecutable), RTMEM_TAG)
+@@ -168,7 +177,10 @@ RTR0DECL(int) RTR0MemObjAllocPageTag(PRTR0MEMOBJ pMemObj, size_t cb,
bool fExecu
+ * @returns IPRT status code.
+ * @param pMemObj Where to store the ring-0 memory object handle.
+ * @param cb Number of bytes to allocate. This is rounded up to nearest
page.
+- * @param fExecutable Flag indicating whether it should be permitted to executed
code in the memory object.
++ * @param fExecutable Flag indicating whether it should be permitted to
++ * executed code in the memory object. The user must
++ * use RTR0MemObjProtect after initialization the
++ * allocation to actually make it executable.
+ * @param pszTag Allocation tag used for statistics and such.
+ */
+ RTR0DECL(int) RTR0MemObjAllocLowTag(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable,
const char *pszTag);
+@@ -182,7 +194,10 @@ RTR0DECL(int) RTR0MemObjAllocLowTag(PRTR0MEMOBJ pMemObj, size_t cb,
bool fExecut
+ * @returns IPRT status code.
+ * @param pMemObj Where to store the ring-0 memory object handle.
+ * @param cb Number of bytes to allocate. This is rounded up to nearest
page.
+- * @param fExecutable Flag indicating whether it should be permitted to executed
code in the memory object.
++ * @param fExecutable Flag indicating whether it should be permitted to
++ * executed code in the memory object. The user must
++ * use RTR0MemObjProtect after initialization the
++ * allocation to actually make it executable.
+ */
+ #define RTR0MemObjAllocCont(pMemObj, cb, fExecutable) \
+ RTR0MemObjAllocContTag((pMemObj), (cb), (fExecutable), RTMEM_TAG)
+@@ -196,7 +211,10 @@ RTR0DECL(int) RTR0MemObjAllocLowTag(PRTR0MEMOBJ pMemObj, size_t cb,
bool fExecut
+ * @returns IPRT status code.
+ * @param pMemObj Where to store the ring-0 memory object handle.
+ * @param cb Number of bytes to allocate. This is rounded up to nearest
page.
+- * @param fExecutable Flag indicating whether it should be permitted to executed
code in the memory object.
++ * @param fExecutable Flag indicating whether it should be permitted to
++ * executed code in the memory object. The user must
++ * use RTR0MemObjProtect after initialization the
++ * allocation to actually make it executable.
+ * @param pszTag Allocation tag used for statistics and such.
+ */
+ RTR0DECL(int) RTR0MemObjAllocContTag(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable,
const char *pszTag);
+diff --git a/trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
b/trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
+index b2c9dcdea8..e42fe255b6 100644
+--- a/trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
++++ b/trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
+@@ -92,7 +92,7 @@
+ * Structures and Typedefs
*
+
*********************************************************************************************************************************/
+ /**
+- * The Darwin version of the memory object structure.
++ * The Linux version of the memory object structure.
+ */
+ typedef struct RTR0MEMOBJLNX
+ {
+@@ -105,11 +105,20 @@ typedef struct RTR0MEMOBJLNX
+ bool fExecutable;
+ /** Set if we've vmap'ed the memory into ring-0. */
+ bool fMappedToRing0;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
++ /** Return from alloc_vm_area() that we now need to use for executable
++ * memory. */
++ struct vm_struct *pArea;
++ /** PTE array that goes along with pArea (must be freed). */
++ pte_t **papPtesForArea;
++#endif
+ /** The pages in the apPages array. */
+ size_t cPages;
+ /** Array of struct page pointers. (variable size) */
+ struct page *apPages[1];
+-} RTR0MEMOBJLNX, *PRTR0MEMOBJLNX;
++} RTR0MEMOBJLNX;
++/** Pointer to the linux memory object. */
++typedef RTR0MEMOBJLNX *PRTR0MEMOBJLNX;
+
+
+ static void rtR0MemObjLinuxFreePages(PRTR0MEMOBJLNX pMemLnx);
+@@ -535,15 +544,49 @@ static int rtR0MemObjLinuxVMap(PRTR0MEMOBJLNX pMemLnx, bool
fExecutable)
+ pgprot_val(fPg) |= _PAGE_NX;
+ # endif
+
++# if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
++ if (fExecutable)
++ {
++ pte_t **papPtes = (pte_t **)kmalloc_array(pMemLnx->cPages,
sizeof(papPtes[0]), GFP_KERNEL);
++ if (papPtes)
++ {
++ pMemLnx->pArea = alloc_vm_area(pMemLnx->Core.cb, papPtes); /*
Note! pArea->nr_pages is not set. */
++ if (pMemLnx->pArea)
++ {
++ size_t i;
++ Assert(pMemLnx->pArea->size >= pMemLnx->Core.cb); /*
Note! includes guard page. */
++ Assert(pMemLnx->pArea->addr);
++# ifdef _PAGE_NX
++ pgprot_val(fPg) |= _PAGE_NX; /* Uses RTR0MemObjProtect to clear NX
when memory ready, W^X fashion. */
++# endif
++ pMemLnx->papPtesForArea = papPtes;
++ for (i = 0; i < pMemLnx->cPages; i++)
++ *papPtes[i] = mk_pte(pMemLnx->apPages[i], fPg);
++ pMemLnx->Core.pv = pMemLnx->pArea->addr;
++ pMemLnx->fMappedToRing0 = true;
++ }
++ else
++ {
++ kfree(papPtes);
++ rc = VERR_MAP_FAILED;
++ }
++ }
++ else
++ rc = VERR_MAP_FAILED;
++ }
++ else
++# endif
++ {
+ # ifdef VM_MAP
+- pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages,
VM_MAP, fPg);
++ pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages,
VM_MAP, fPg);
+ # else
+- pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages,
VM_ALLOC, fPg);
++ pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages,
VM_ALLOC, fPg);
+ # endif
+- if (pMemLnx->Core.pv)
+- pMemLnx->fMappedToRing0 = true;
+- else
+- rc = VERR_MAP_FAILED;
++ if (pMemLnx->Core.pv)
++ pMemLnx->fMappedToRing0 = true;
++ else
++ rc = VERR_MAP_FAILED;
++ }
+ #else /* < 2.4.22 */
+ rc = VERR_NOT_SUPPORTED;
+ #endif
+@@ -569,6 +612,22 @@ static int rtR0MemObjLinuxVMap(PRTR0MEMOBJLNX pMemLnx, bool
fExecutable)
+ static void rtR0MemObjLinuxVUnmap(PRTR0MEMOBJLNX pMemLnx)
+ {
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
++# if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
++ if (pMemLnx->pArea)
++ {
++# if 0
++ pte_t **papPtes = pMemLnx->papPtesForArea;
++ size_t i;
++ for (i = 0; i < pMemLnx->cPages; i++)
++ *papPtes[i] = 0;
++# endif
++ free_vm_area(pMemLnx->pArea);
++ kfree(pMemLnx->papPtesForArea);
++ pMemLnx->pArea = NULL;
++ pMemLnx->papPtesForArea = NULL;
++ }
++ else
++# endif
+ if (pMemLnx->fMappedToRing0)
+ {
+ Assert(pMemLnx->Core.pv);
+@@ -1437,6 +1496,7 @@ DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL
ppMem, RTR0MEMOBJ
+ * Use vmap - 2.4.22 and later.
+ */
+ pgprot_t fPg = rtR0MemObjLinuxConvertProt(fProt, true /* kernel */);
++ /** @todo We don't really care too much for EXEC here... 5.8 always adds
NX. */
+ Assert(((offSub + cbSub) >> PAGE_SHIFT) <=
pMemLnxToMap->cPages);
+ # ifdef VM_MAP
+ pMemLnx->Core.pv = vmap(&pMemLnxToMap->apPages[offSub >>
PAGE_SHIFT], cbSub >> PAGE_SHIFT, VM_MAP, fPg);
+@@ -1768,6 +1828,29 @@ DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL
ppMem, RTR0MEMOBJ p
+
+ DECLHIDDEN(int) rtR0MemObjNativeProtect(PRTR0MEMOBJINTERNAL pMem, size_t offSub, size_t
cbSub, uint32_t fProt)
+ {
++# if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0)
++ /*
++ * Currently only supported when we've got addresses PTEs from the kernel.
++ */
++ PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem;
++ if (pMemLnx->pArea && pMemLnx->papPtesForArea)
++ {
++ pgprot_t const fPg = rtR0MemObjLinuxConvertProt(fProt, true /*fKernel*/);
++ size_t const cPages = (offSub + cbSub) >> PAGE_SHIFT;
++ pte_t **papPtes = pMemLnx->papPtesForArea;
++ size_t i;
++
++ for (i = offSub >> PAGE_SHIFT; i < cPages; i++)
++ {
++ set_pte(papPtes[i], mk_pte(pMemLnx->apPages[i], fPg));
++ }
++ preempt_disable();
++ __flush_tlb_all();
++ preempt_enable();
++ return VINF_SUCCESS;
++ }
++# endif
++
+ NOREF(pMem);
+ NOREF(offSub);
+ NOREF(cbSub);
diff --git a/VirtualBox.spec b/VirtualBox.spec
index 3022801..cb7cde1 100644
--- a/VirtualBox.spec
+++ b/VirtualBox.spec
@@ -46,7 +46,7 @@
Name: VirtualBox
Version: 6.1.12
-Release: 1%{?dist}
+Release: 2%{?dist}
Summary: A general-purpose full virtualizer for PC hardware
License: GPLv2 or (GPLv2 and CDDL)
@@ -95,6 +95,13 @@ Patch70: vbox-python-detection.diff
Patch80: VirtualBox-6.1.4-gcc10.patch
Patch86: VirtualBox-6.1.0-VBoxRem.patch
+Patch87: 3c981196de564d78aa8c653496f7fefe303bf7b6.patch
+Patch88: 521d08e75cb85b0dad89643d2a9de39dfb6f8832.patch
+Patch89: 9f9e3db9d80be17d6fc9be48b6d8745c971fca99.patch
+Patch90: 6370c9d7c7908f7072b654f9794ed6c5d562768b.patch
+Patch91: 088da92603cb4f1175ed6d0c452b049af3372c1e.patch
+Patch92: b0f29563e5a7e5d4af8585ee0fffe208d3f528d2.patch
+Patch93: 842e5679b0904a80c7f065ad50417d82af265395.patch
BuildRequires: kBuild >= 0.1.9998.r3093
@@ -344,7 +351,14 @@ rm -r src/libs/zlib-1.2.*/
%patch61 -p1 -b .automount
%patch70 -p1 -b .python-detection
%patch80 -p1 -b .gcc10
-%patch86 -p1 -b .vboxrem
+#patch86 -p1 -b .vboxrem
+%patch87 -p2 -b .kernel-5.8
+%patch88 -p2 -b .kernel-5.8
+%patch89 -p2 -b .kernel-5.8
+%patch90 -p2 -b .kernel-5.8
+%patch91 -p2 -b .kernel-5.8
+%patch92 -p2 -b .kernel-5.8
+%patch93 -p2 -b .kernel-5.8
%build
@@ -899,6 +913,9 @@ getent passwd vboxadd >/dev/null || \
%{_datadir}/%{name}-kmod-%{version}
%changelog
+* Wed Aug 05 2020 Sérgio Basto <sergio(a)serjux.com> - 6.1.12-2
+- Updates for kernel-5.8
+
* Thu Jul 16 2020 Sérgio Basto <sergio(a)serjux.com> - 6.1.12-1
- Update VBox to 6.1.12
- From Debian disable cloud_net "Fix build failure due to missing upstream
file"
diff --git a/b0f29563e5a7e5d4af8585ee0fffe208d3f528d2.patch
b/b0f29563e5a7e5d4af8585ee0fffe208d3f528d2.patch
new file mode 100644
index 0000000..8384188
--- /dev/null
+++ b/b0f29563e5a7e5d4af8585ee0fffe208d3f528d2.patch
@@ -0,0 +1,27 @@
+From b0f29563e5a7e5d4af8585ee0fffe208d3f528d2 Mon Sep 17 00:00:00 2001
+From: vboxsync <vboxsync@cfe28804-0f27-0410-a406-dd0f0b0b656f>
+Date: Wed, 29 Jul 2020 10:47:38 +0000
+Subject: [PATCH] IPRT/memobj-r0drv-linux.c: Enable
+ IPRT_USE_ALLOC_VM_AREA_FOR_EXEC for linux 3.2 and later. bugref:9801
+
+git-svn-id:
http://www.virtualbox.org/svn/vbox@85516
cfe28804-0f27-0410-a406-dd0f0b0b656f
+---
+ trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
b/trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
+index 4edf533d45..f2a6f829d6 100644
+--- a/trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
++++ b/trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
+@@ -54,8 +54,9 @@
+
+ /** @def IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
+ * Whether we use alloc_vm_area (3.2+) for executable memory.
+- * This is a must for 5.8+, but we'll enable it for earlier kernels later. */
+-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 8, 0) || defined(DOXYGEN_RUNNING)
++ * This is a must for 5.8+, but we enable it all the way back to 3.2.x for
++ * better W^R compliance (fExecutable flag). */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0) || defined(DOXYGEN_RUNNING)
+ # define IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
+ #endif
+
diff --git a/update_vbox.sh b/update_vbox.sh
index 4784e6f..5e18fe0 100755
--- a/update_vbox.sh
+++ b/update_vbox.sh
@@ -1,5 +1,5 @@
VERSION=6.1.12
-REL=1
+REL=2
RAWHIDE=33
REPOS="f32 f31 el8 el7"
if [ -z "$1" ]