aboutsummaryrefslogtreecommitdiff
path: root/personal/zfs/patches/linux5.8.patch
blob: a05cd4f8dabce85e6afd992477198527c10e3088 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
diff -ur a/config/kernel.m4 b/config/kernel.m4
--- a/config/kernel.m4	2020-05-05 12:56:29.709370875 -0400
+++ b/config/kernel.m4	2020-08-12 10:38:58.842672016 -0400
@@ -45,6 +45,7 @@
 	ZFS_AC_KERNEL_SRC_SCHED
 	ZFS_AC_KERNEL_SRC_USLEEP_RANGE
 	ZFS_AC_KERNEL_SRC_KMEM_CACHE
+	ZFS_AC_KERNEL_SRC_VMALLOC_PAGE_KERNEL
 	ZFS_AC_KERNEL_SRC_WAIT
 	ZFS_AC_KERNEL_SRC_INODE_TIMES
 	ZFS_AC_KERNEL_SRC_INODE_LOCK
@@ -163,6 +164,7 @@
 	ZFS_AC_KERNEL_SCHED
 	ZFS_AC_KERNEL_USLEEP_RANGE
 	ZFS_AC_KERNEL_KMEM_CACHE
+	ZFS_AC_KERNEL_VMALLOC_PAGE_KERNEL
 	ZFS_AC_KERNEL_WAIT
 	ZFS_AC_KERNEL_INODE_TIMES
 	ZFS_AC_KERNEL_INODE_LOCK
@@ -894,3 +896,28 @@
 	    [test -f build/conftest/conftest.ko],
 	    [$3], [$4], [$5])
 ])
+
+dnl #
+dnl # 5.8 API,
+dnl # __vmalloc PAGE_KERNEL removal
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_VMALLOC_PAGE_KERNEL], [
+	ZFS_LINUX_TEST_SRC([__vmalloc], [
+		#include <linux/mm.h>
+		#include <linux/vmalloc.h>
+	],[
+		void *p __attribute__ ((unused));
+
+		p = __vmalloc(0, GFP_KERNEL, PAGE_KERNEL);
+	])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_VMALLOC_PAGE_KERNEL], [
+	AC_MSG_CHECKING([whether __vmalloc(ptr, flags, pageflags) is available])
+	ZFS_LINUX_TEST_RESULT([__vmalloc], [
+		AC_MSG_RESULT(yes)
+		AC_DEFINE(HAVE_VMALLOC_PAGE_KERNEL, 1, [__vmalloc page flags exists])
+	],[
+		AC_MSG_RESULT(no)
+	])
+])
diff -ur a/include/spl/sys/kmem.h b/include/spl/sys/kmem.h
--- a/include/spl/sys/kmem.h	2020-05-05 12:56:29.717370676 -0400
+++ b/include/spl/sys/kmem.h	2020-08-12 10:41:20.253453058 -0400
@@ -170,6 +170,15 @@
 extern void spl_kmem_free(const void *ptr, size_t sz);
 
 /*
+ * 5.8 API change, pgprot_t argument removed.
+ */
+#ifdef HAVE_VMALLOC_PAGE_KERNEL
+#define spl_vmalloc(size, flags) __vmalloc(size, flags, PAGE_KERNEL)
+#else
+#define spl_vmalloc(size, flags) __vmalloc(size, flags)
+#endif
+
+/*
  * The following functions are only available for internal use.
  */
 extern void *spl_kmem_alloc_impl(size_t size, int flags, int node);
diff -ur a/module/spl/spl-kmem-cache.c b/module/spl/spl-kmem-cache.c
--- a/module/spl/spl-kmem-cache.c	2020-05-05 12:57:28.442960922 -0400
+++ b/module/spl/spl-kmem-cache.c	2020-08-12 10:38:58.843672014 -0400
@@ -203,7 +203,7 @@
 		ASSERT(ISP2(size));
 		ptr = (void *)__get_free_pages(lflags, get_order(size));
 	} else {
-		ptr = __vmalloc(size, lflags | __GFP_HIGHMEM, PAGE_KERNEL);
+		ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM);
 	}
 
 	/* Resulting allocated memory will be page aligned */
@@ -1242,7 +1242,7 @@
 	 * allocation.
 	 *
 	 * However, this can't be applied to KVM_VMEM due to a bug that
-	 * __vmalloc() doesn't honor gfp flags in page table allocation.
+	 * spl_vmalloc() doesn't honor gfp flags in page table allocation.
 	 */
 	if (!(skc->skc_flags & KMC_VMEM)) {
 		rc = __spl_cache_grow(skc, flags | KM_NOSLEEP);
diff -ur a/module/spl/spl-kmem.c b/module/spl/spl-kmem.c
--- a/module/spl/spl-kmem.c	2020-05-05 12:57:28.442960922 -0400
+++ b/module/spl/spl-kmem.c	2020-08-12 10:38:58.843672014 -0400
@@ -172,7 +172,7 @@
 		 * kmem_zalloc() callers.
 		 *
 		 * For vmem_alloc() and vmem_zalloc() callers it is permissible
-		 * to use __vmalloc().  However, in general use of __vmalloc()
+		 * to use spl_vmalloc().  However, in general use of spl_vmalloc()
 		 * is strongly discouraged because a global lock must be
 		 * acquired.  Contention on this lock can significantly
 		 * impact performance so frequently manipulating the virtual
@@ -180,8 +180,7 @@
 		 */
 		if ((size > spl_kmem_alloc_max) || use_vmem) {
 			if (flags & KM_VMEM) {
-				ptr = __vmalloc(size, lflags | __GFP_HIGHMEM,
-				    PAGE_KERNEL);
+				ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM);
 			} else {
 				return (NULL);
 			}
@@ -194,7 +193,7 @@
 
 		/*
 		 * For vmem_alloc() and vmem_zalloc() callers retry immediately
-		 * using __vmalloc() which is unlikely to fail.
+		 * using spl_vmalloc() which is unlikely to fail.
 		 */
 		if ((flags & KM_VMEM) && (use_vmem == 0))  {
 			use_vmem = 1;