aboutsummaryrefslogtreecommitdiff
path: root/personal/zfs/patches/linux5.8.patch
diff options
context:
space:
mode:
Diffstat (limited to 'personal/zfs/patches/linux5.8.patch')
-rw-r--r--personal/zfs/patches/linux5.8.patch119
1 files changed, 119 insertions, 0 deletions
diff --git a/personal/zfs/patches/linux5.8.patch b/personal/zfs/patches/linux5.8.patch
new file mode 100644
index 0000000..a05cd4f
--- /dev/null
+++ b/personal/zfs/patches/linux5.8.patch
@@ -0,0 +1,119 @@
+diff -ur a/config/kernel.m4 b/config/kernel.m4
+--- a/config/kernel.m4 2020-05-05 12:56:29.709370875 -0400
++++ b/config/kernel.m4 2020-08-12 10:38:58.842672016 -0400
+@@ -45,6 +45,7 @@
+ ZFS_AC_KERNEL_SRC_SCHED
+ ZFS_AC_KERNEL_SRC_USLEEP_RANGE
+ ZFS_AC_KERNEL_SRC_KMEM_CACHE
++ ZFS_AC_KERNEL_SRC_VMALLOC_PAGE_KERNEL
+ ZFS_AC_KERNEL_SRC_WAIT
+ ZFS_AC_KERNEL_SRC_INODE_TIMES
+ ZFS_AC_KERNEL_SRC_INODE_LOCK
+@@ -163,6 +164,7 @@
+ ZFS_AC_KERNEL_SCHED
+ ZFS_AC_KERNEL_USLEEP_RANGE
+ ZFS_AC_KERNEL_KMEM_CACHE
++ ZFS_AC_KERNEL_VMALLOC_PAGE_KERNEL
+ ZFS_AC_KERNEL_WAIT
+ ZFS_AC_KERNEL_INODE_TIMES
+ ZFS_AC_KERNEL_INODE_LOCK
+@@ -894,3 +896,28 @@
+ [test -f build/conftest/conftest.ko],
+ [$3], [$4], [$5])
+ ])
++
++dnl #
++dnl # 5.8 API,
++dnl # __vmalloc PAGE_KERNEL removal
++dnl #
++AC_DEFUN([ZFS_AC_KERNEL_SRC_VMALLOC_PAGE_KERNEL], [
++ ZFS_LINUX_TEST_SRC([__vmalloc], [
++ #include <linux/mm.h>
++ #include <linux/vmalloc.h>
++ ],[
++ void *p __attribute__ ((unused));
++
++ p = __vmalloc(0, GFP_KERNEL, PAGE_KERNEL);
++ ])
++])
++
++AC_DEFUN([ZFS_AC_KERNEL_VMALLOC_PAGE_KERNEL], [
++ AC_MSG_CHECKING([whether __vmalloc(ptr, flags, pageflags) is available])
++ ZFS_LINUX_TEST_RESULT([__vmalloc], [
++ AC_MSG_RESULT(yes)
++ AC_DEFINE(HAVE_VMALLOC_PAGE_KERNEL, 1, [__vmalloc page flags exists])
++ ],[
++ AC_MSG_RESULT(no)
++ ])
++])
+diff -ur a/include/spl/sys/kmem.h b/include/spl/sys/kmem.h
+--- a/include/spl/sys/kmem.h 2020-05-05 12:56:29.717370676 -0400
++++ b/include/spl/sys/kmem.h 2020-08-12 10:41:20.253453058 -0400
+@@ -170,6 +170,15 @@
+ extern void spl_kmem_free(const void *ptr, size_t sz);
+
+ /*
++ * 5.8 API change, pgprot_t argument removed.
++ */
++#ifdef HAVE_VMALLOC_PAGE_KERNEL
++#define spl_vmalloc(size, flags) __vmalloc(size, flags, PAGE_KERNEL)
++#else
++#define spl_vmalloc(size, flags) __vmalloc(size, flags)
++#endif
++
++/*
+ * The following functions are only available for internal use.
+ */
+ extern void *spl_kmem_alloc_impl(size_t size, int flags, int node);
+diff -ur a/module/spl/spl-kmem-cache.c b/module/spl/spl-kmem-cache.c
+--- a/module/spl/spl-kmem-cache.c 2020-05-05 12:57:28.442960922 -0400
++++ b/module/spl/spl-kmem-cache.c 2020-08-12 10:38:58.843672014 -0400
+@@ -203,7 +203,7 @@
+ ASSERT(ISP2(size));
+ ptr = (void *)__get_free_pages(lflags, get_order(size));
+ } else {
+- ptr = __vmalloc(size, lflags | __GFP_HIGHMEM, PAGE_KERNEL);
++ ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM);
+ }
+
+ /* Resulting allocated memory will be page aligned */
+@@ -1242,7 +1242,7 @@
+ * allocation.
+ *
+ * However, this can't be applied to KVM_VMEM due to a bug that
+- * __vmalloc() doesn't honor gfp flags in page table allocation.
++ * spl_vmalloc() doesn't honor gfp flags in page table allocation.
+ */
+ if (!(skc->skc_flags & KMC_VMEM)) {
+ rc = __spl_cache_grow(skc, flags | KM_NOSLEEP);
+diff -ur a/module/spl/spl-kmem.c b/module/spl/spl-kmem.c
+--- a/module/spl/spl-kmem.c 2020-05-05 12:57:28.442960922 -0400
++++ b/module/spl/spl-kmem.c 2020-08-12 10:38:58.843672014 -0400
+@@ -172,7 +172,7 @@
+ * kmem_zalloc() callers.
+ *
+ * For vmem_alloc() and vmem_zalloc() callers it is permissible
+- * to use __vmalloc(). However, in general use of __vmalloc()
++ * to use spl_vmalloc(). However, in general use of spl_vmalloc()
+ * is strongly discouraged because a global lock must be
+ * acquired. Contention on this lock can significantly
+ * impact performance so frequently manipulating the virtual
+@@ -180,8 +180,7 @@
+ */
+ if ((size > spl_kmem_alloc_max) || use_vmem) {
+ if (flags & KM_VMEM) {
+- ptr = __vmalloc(size, lflags | __GFP_HIGHMEM,
+- PAGE_KERNEL);
++ ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM);
+ } else {
+ return (NULL);
+ }
+@@ -194,7 +193,7 @@
+
+ /*
+ * For vmem_alloc() and vmem_zalloc() callers retry immediately
+- * using __vmalloc() which is unlikely to fail.
++ * using spl_vmalloc() which is unlikely to fail.
+ */
+ if ((flags & KM_VMEM) && (use_vmem == 0)) {
+ use_vmem = 1;