commit 00a771f5a8025404926f049a5ee23b1eb836581f
Author: Sérgio M. Basto <sergio(a)serjux.com>
Date: Fri Dec 4 13:32:33 2020 +0000
Add fixes for kernel 5.10
VirtualBox-kmod.spec | 7 +-
fixes_4.10.patch | 188 +++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 194 insertions(+), 1 deletion(-)
---
diff --git a/VirtualBox-kmod.spec b/VirtualBox-kmod.spec
index cf95719..32b612f 100644
--- a/VirtualBox-kmod.spec
+++ b/VirtualBox-kmod.spec
@@ -37,7 +37,7 @@
Name: VirtualBox-kmod
Version: 6.1.16
-Release: 1%{?dist}
+Release: 2%{?dist}
#Release: 1%%{?prerel:.%%{prerel}}%%{?dist}
Summary: Kernel module for VirtualBox
@@ -45,6 +45,7 @@ License: GPLv2 or CDDL
URL:
http://www.virtualbox.org/wiki/VirtualBox
# This filters out the XEN kernel, since we don't run on XEN
Source1: excludekernel-filter.txt
+Patch1: fixes_4.10.patch
%global AkmodsBuildRequires %{_bindir}/kmodtool, VirtualBox-kmodsrc >=
%{version}%{vboxreltag}, xz, time, elfutils-libelf-devel, gcc
@@ -67,6 +68,7 @@ Kernel module for VirtualBox
%prep
%setup -T -c
tar --use-compress-program xz -xf
%{_datadir}/%{name}-%{version}/%{name}-%{version}.tar.xz
+%patch1 -p0
pushd %{name}-%{version}
popd
@@ -131,6 +133,9 @@ DIRS=$(ls %{name}-%{version} |wc -l)
%changelog
+* Fri Dec 04 2020 Sérgio Basto <sergio(a)serjux.com> - 6.1.16-2
+- Add fixes for kernel 5.10
+
* Wed Oct 21 2020 Sérgio Basto <sergio(a)serjux.com> - 6.1.16-1
- Update to 6.1.16
diff --git a/fixes_4.10.patch b/fixes_4.10.patch
new file mode 100644
index 0000000..0a6b691
--- /dev/null
+++ b/fixes_4.10.patch
@@ -0,0 +1,188 @@
+diff -rup VirtualBox-kmod-6.1.16/vboxdrv/r0drv/linux/memobj-r0drv-linux.c
VirtualBox-kmod-6.1.16.new/vboxdrv/r0drv/linux/memobj-r0drv-linux.c
+--- VirtualBox-kmod-6.1.16/vboxdrv/r0drv/linux/memobj-r0drv-linux.c 2020-10-16
17:38:11.000000000 +0100
++++ VirtualBox-kmod-6.1.16.new/vboxdrv/r0drv/linux/memobj-r0drv-linux.c 2020-12-04
13:05:02.345818731 +0000
+@@ -56,9 +56,12 @@
+ * Whether we use alloc_vm_area (3.2+) for executable memory.
+ * This is a must for 5.8+, but we enable it all the way back to 3.2.x for
+ * better W^R compliance (fExecutable flag). */
+-#if RTLNX_VER_MIN(3,2,0) || defined(DOXYGEN_RUNNING)
++#if RTLNX_VER_RANGE(3,2,0, 5,10,0) || defined(DOXYGEN_RUNNING)
+ # define IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
+ #endif
++#if RTLNX_VER_MIN(5,10,0) || defined(DOXYGEN_RUNNING)
++# define IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC
++#endif
+
+ /*
+ * 2.6.29+ kernels don't work with remap_pfn_range() anymore because
+@@ -502,6 +505,42 @@ static void rtR0MemObjLinuxFreePages(PRT
+ }
+
+
++#ifdef IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC
++/**
++ * User data passed to the apply_to_page_range() callback.
++ */
++typedef struct LNXAPPLYPGRANGE
++{
++ /** Pointer to the memory object. */
++ PRTR0MEMOBJLNX pMemLnx;
++ /** The page protection flags to apply. */
++ pgprot_t fPg;
++} LNXAPPLYPGRANGE;
++/** Pointer to the user data. */
++typedef LNXAPPLYPGRANGE *PLNXAPPLYPGRANGE;
++/** Pointer to the const user data. */
++typedef const LNXAPPLYPGRANGE *PCLNXAPPLYPGRANGE;
++
++/**
++ * Callback called in apply_to_page_range().
++ *
++ * @returns Linux status code.
++ * @param pPte Pointer to the page table entry for the given address.
++ * @param uAddr The address to apply the new protection to.
++ * @param pvUser The opaque user data.
++ */
++static DECLCALLBACK(int) rtR0MemObjLinuxApplyPageRange(pte_t *pPte, unsigned long uAddr,
void *pvUser)
++{
++ PCLNXAPPLYPGRANGE pArgs = (PCLNXAPPLYPGRANGE)pvUser;
++ PRTR0MEMOBJLNX pMemLnx = pArgs->pMemLnx;
++ uint32_t idxPg = (uAddr - (unsigned long)pMemLnx->Core.pv) >> PAGE_SHIFT;
++
++ set_pte(pPte, mk_pte(pMemLnx->apPages[idxPg], pArgs->fPg));
++ return 0;
++}
++#endif
++
++
+ /**
+ * Maps the allocation into ring-0.
+ *
+@@ -584,6 +623,11 @@ static int rtR0MemObjLinuxVMap(PRTR0MEMO
+ else
+ # endif
+ {
++# if defined(IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC)
++ if (fExecutable)
++ pgprot_val(fPg) |= _PAGE_NX; /* Uses RTR0MemObjProtect to clear NX when
memory ready, W^X fashion. */
++# endif
++
+ # ifdef VM_MAP
+ pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages,
VM_MAP, fPg);
+ # else
+@@ -1851,6 +1895,21 @@ DECLHIDDEN(int) rtR0MemObjNativeProtect(
+ preempt_enable();
+ return VINF_SUCCESS;
+ }
++# elif defined(IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC)
++ PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem;
++ if ( pMemLnx->fExecutable
++ && pMemLnx->fMappedToRing0)
++ {
++ LNXAPPLYPGRANGE Args;
++ Args.pMemLnx = pMemLnx;
++ Args.fPg = rtR0MemObjLinuxConvertProt(fProt, true /*fKernel*/);
++ int rcLnx = apply_to_page_range(current->active_mm, (unsigned
long)pMemLnx->Core.pv + offSub, cbSub,
++ rtR0MemObjLinuxApplyPageRange, (void
*)&Args);
++ if (rcLnx)
++ return VERR_NOT_SUPPORTED;
++
++ return VINF_SUCCESS;
++ }
+ # endif
+
+ NOREF(pMem);
+diff -rup VirtualBox-kmod-6.1.16/vboxguest/r0drv/linux/memobj-r0drv-linux.c
VirtualBox-kmod-6.1.16.new/vboxguest/r0drv/linux/memobj-r0drv-linux.c
+--- VirtualBox-kmod-6.1.16/vboxguest/r0drv/linux/memobj-r0drv-linux.c 2020-10-16
17:38:11.000000000 +0100
++++ VirtualBox-kmod-6.1.16.new/vboxguest/r0drv/linux/memobj-r0drv-linux.c 2020-12-04
13:07:32.088769617 +0000
+@@ -56,9 +56,12 @@
+ * Whether we use alloc_vm_area (3.2+) for executable memory.
+ * This is a must for 5.8+, but we enable it all the way back to 3.2.x for
+ * better W^R compliance (fExecutable flag). */
+-#if RTLNX_VER_MIN(3,2,0) || defined(DOXYGEN_RUNNING)
++#if RTLNX_VER_RANGE(3,2,0, 5,10,0) || defined(DOXYGEN_RUNNING)
+ # define IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
+ #endif
++#if RTLNX_VER_MIN(5,10,0) || defined(DOXYGEN_RUNNING)
++# define IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC
++#endif
+
+ /*
+ * 2.6.29+ kernels don't work with remap_pfn_range() anymore because
+@@ -502,6 +505,42 @@ static void rtR0MemObjLinuxFreePages(PRT
+ }
+
+
++#ifdef IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC
++/**
++ * User data passed to the apply_to_page_range() callback.
++ */
++typedef struct LNXAPPLYPGRANGE
++{
++ /** Pointer to the memory object. */
++ PRTR0MEMOBJLNX pMemLnx;
++ /** The page protection flags to apply. */
++ pgprot_t fPg;
++} LNXAPPLYPGRANGE;
++/** Pointer to the user data. */
++typedef LNXAPPLYPGRANGE *PLNXAPPLYPGRANGE;
++/** Pointer to the const user data. */
++typedef const LNXAPPLYPGRANGE *PCLNXAPPLYPGRANGE;
++
++/**
++ * Callback called in apply_to_page_range().
++ *
++ * @returns Linux status code.
++ * @param pPte Pointer to the page table entry for the given address.
++ * @param uAddr The address to apply the new protection to.
++ * @param pvUser The opaque user data.
++ */
++static DECLCALLBACK(int) rtR0MemObjLinuxApplyPageRange(pte_t *pPte, unsigned long uAddr,
void *pvUser)
++{
++ PCLNXAPPLYPGRANGE pArgs = (PCLNXAPPLYPGRANGE)pvUser;
++ PRTR0MEMOBJLNX pMemLnx = pArgs->pMemLnx;
++ uint32_t idxPg = (uAddr - (unsigned long)pMemLnx->Core.pv) >> PAGE_SHIFT;
++
++ set_pte(pPte, mk_pte(pMemLnx->apPages[idxPg], pArgs->fPg));
++ return 0;
++}
++#endif
++
++
+ /**
+ * Maps the allocation into ring-0.
+ *
+@@ -584,6 +623,11 @@ static int rtR0MemObjLinuxVMap(PRTR0MEMO
+ else
+ # endif
+ {
++# if defined(IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC)
++ if (fExecutable)
++ pgprot_val(fPg) |= _PAGE_NX; /* Uses RTR0MemObjProtect to clear NX when
memory ready, W^X fashion. */
++# endif
++
+ # ifdef VM_MAP
+ pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages,
VM_MAP, fPg);
+ # else
+@@ -1851,6 +1895,21 @@ DECLHIDDEN(int) rtR0MemObjNativeProtect(
+ preempt_enable();
+ return VINF_SUCCESS;
+ }
++# elif defined(IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC)
++ PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem;
++ if ( pMemLnx->fExecutable
++ && pMemLnx->fMappedToRing0)
++ {
++ LNXAPPLYPGRANGE Args;
++ Args.pMemLnx = pMemLnx;
++ Args.fPg = rtR0MemObjLinuxConvertProt(fProt, true /*fKernel*/);
++ int rcLnx = apply_to_page_range(current->active_mm, (unsigned
long)pMemLnx->Core.pv + offSub, cbSub,
++ rtR0MemObjLinuxApplyPageRange, (void
*)&Args);
++ if (rcLnx)
++ return VERR_NOT_SUPPORTED;
++
++ return VINF_SUCCESS;
++ }
+ # endif
+
+ NOREF(pMem);